diff --git a/[refs] b/[refs]
index d151d887c06d..1febf3ee4423 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: aad1830e6b978f5d90e5d81d071e1752f526f732
+refs/heads/master: b19f8200392db88d529576a4f8178d08b70047be
diff --git a/trunk/Documentation/DocBook/kernel-locking.tmpl b/trunk/Documentation/DocBook/kernel-locking.tmpl
index a0d479d1e1dd..084f6ad7b7a0 100644
--- a/trunk/Documentation/DocBook/kernel-locking.tmpl
+++ b/trunk/Documentation/DocBook/kernel-locking.tmpl
@@ -1922,12 +1922,9 @@ machines due to caching.
mutex_lock()
- There is a mutex_trylock() which does not
- sleep. Still, it must not be used inside interrupt context since
- its implementation is not safe for that.
+ There is a mutex_trylock() which can be
+ used inside interrupt context, as it will not sleep.
mutex_unlock() will also never sleep.
- It cannot be used in interrupt context either since a mutex
- must be released by the same task that acquired it.
@@ -1961,12 +1958,6 @@ machines due to caching.
-
- Mutex API reference
-!Iinclude/linux/mutex.h
-!Ekernel/mutex.c
-
-
Further reading
diff --git a/trunk/Documentation/DocBook/tracepoint.tmpl b/trunk/Documentation/DocBook/tracepoint.tmpl
index b57a9ede3224..e8473eae2a20 100644
--- a/trunk/Documentation/DocBook/tracepoint.tmpl
+++ b/trunk/Documentation/DocBook/tracepoint.tmpl
@@ -104,9 +104,4 @@
Block IO
!Iinclude/trace/events/block.h
-
-
- Workqueue
-!Iinclude/trace/events/workqueue.h
-
diff --git a/trunk/Documentation/block/cfq-iosched.txt b/trunk/Documentation/block/cfq-iosched.txt
deleted file mode 100644
index e578feed6d81..000000000000
--- a/trunk/Documentation/block/cfq-iosched.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-CFQ ioscheduler tunables
-========================
-
-slice_idle
-----------
-This specifies how long CFQ should idle for next request on certain cfq queues
-(for sequential workloads) and service trees (for random workloads) before
-queue is expired and CFQ selects next queue to dispatch from.
-
-By default slice_idle is a non-zero value. That means by default we idle on
-queues/service trees. This can be very helpful on highly seeky media like
-single spindle SATA/SAS disks where we can cut down on overall number of
-seeks and see improved throughput.
-
-Setting slice_idle to 0 will remove all the idling on queues/service tree
-level and one should see an overall improved throughput on faster storage
-devices like multiple SATA/SAS disks in hardware RAID configuration. The down
-side is that isolation provided from WRITES also goes down and notion of
-IO priority becomes weaker.
-
-So depending on storage and workload, it might be useful to set slice_idle=0.
-In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
-keeping slice_idle enabled should be useful. For any configurations where
-there are multiple spindles behind single LUN (Host based hardware RAID
-controller or for storage arrays), setting slice_idle=0 might end up in better
-throughput and acceptable latencies.
-
-CFQ IOPS Mode for group scheduling
-===================================
-Basic CFQ design is to provide priority based time slices. Higher priority
-process gets bigger time slice and lower priority process gets smaller time
-slice. Measuring time becomes harder if storage is fast and supports NCQ and
-it would be better to dispatch multiple requests from multiple cfq queues in
-request queue at a time. In such scenario, it is not possible to measure time
-consumed by single queue accurately.
-
-What is possible though is to measure number of requests dispatched from a
-single queue and also allow dispatch from multiple cfq queue at the same time.
-This effectively becomes the fairness in terms of IOPS (IO operations per
-second).
-
-If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
-to IOPS mode and starts providing fairness in terms of number of requests
-dispatched. Note that this mode switching takes effect only for group
-scheduling. For non-cgroup users nothing should change.
diff --git a/trunk/Documentation/cgroups/blkio-controller.txt b/trunk/Documentation/cgroups/blkio-controller.txt
index 6919d62591d9..48e0b21b0059 100644
--- a/trunk/Documentation/cgroups/blkio-controller.txt
+++ b/trunk/Documentation/cgroups/blkio-controller.txt
@@ -217,7 +217,6 @@ Details of cgroup files
CFQ sysfs tunable
=================
/sys/block//queue/iosched/group_isolation
------------------------------------------------
If group_isolation=1, it provides stronger isolation between groups at the
expense of throughput. By default group_isolation is 0. In general that
@@ -244,33 +243,6 @@ By default one should run with group_isolation=0. If that is not sufficient
and one wants stronger isolation between groups, then set group_isolation=1
but this will come at cost of reduced throughput.
-/sys/block//queue/iosched/slice_idle
-------------------------------------------
-On a faster hardware CFQ can be slow, especially with sequential workload.
-This happens because CFQ idles on a single queue and single queue might not
-drive deeper request queue depths to keep the storage busy. In such scenarios
-one can try setting slice_idle=0 and that would switch CFQ to IOPS
-(IO operations per second) mode on NCQ supporting hardware.
-
-That means CFQ will not idle between cfq queues of a cfq group and hence be
-able to driver higher queue depth and achieve better throughput. That also
-means that cfq provides fairness among groups in terms of IOPS and not in
-terms of disk time.
-
-/sys/block//queue/iosched/group_idle
-------------------------------------------
-If one disables idling on individual cfq queues and cfq service trees by
-setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
-on the group in an attempt to provide fairness among groups.
-
-By default group_idle is same as slice_idle and does not do anything if
-slice_idle is enabled.
-
-One can experience an overall throughput drop if you have created multiple
-groups and put applications in that group which are not driving enough
-IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
-on individual groups and throughput should improve.
-
What works
==========
- Currently only sync IO queues are support. All the buffered writes are
diff --git a/trunk/Documentation/gpio.txt b/trunk/Documentation/gpio.txt
index 9633da01ff46..d96a6dba5748 100644
--- a/trunk/Documentation/gpio.txt
+++ b/trunk/Documentation/gpio.txt
@@ -109,19 +109,17 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
If you want to initialize a structure with an invalid GPIO number, use
some negative number (perhaps "-EINVAL"); that will never be valid. To
-test if such number from such a structure could reference a GPIO, you
-may use this predicate:
+test if a number could reference a GPIO, you may use this predicate:
int gpio_is_valid(int number);
A number that's not valid will be rejected by calls which may request
or free GPIOs (see below). Other numbers may also be rejected; for
-example, a number might be valid but temporarily unused on a given board.
+example, a number might be valid but unused on a given board.
+
+Whether a platform supports multiple GPIO controllers is currently a
+platform-specific implementation issue.
-Whether a platform supports multiple GPIO controllers is a platform-specific
-implementation issue, as are whether that support can leave "holes" in the space
-of GPIO numbers, and whether new controllers can be added at runtime. Such issues
-can affect things including whether adjacent GPIO numbers are both valid.
Using GPIOs
-----------
@@ -482,16 +480,12 @@ To support this framework, a platform's Kconfig will "select" either
ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
and arrange that its includes and defines
three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
+They may also want to provide a custom value for ARCH_NR_GPIOS.
-It may also provide a custom value for ARCH_NR_GPIOS, so that it better
-reflects the number of GPIOs in actual use on that platform, without
-wasting static table space. (It should count both built-in/SoC GPIOs and
-also ones on GPIO expanders.
-
-ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
+ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
into the kernel on that architecture.
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
can enable it and build it into the kernel optionally.
If neither of these options are selected, the platform does not support
diff --git a/trunk/Documentation/hwmon/f71882fg b/trunk/Documentation/hwmon/f71882fg
index a7952c2bd959..1a07fd674cd0 100644
--- a/trunk/Documentation/hwmon/f71882fg
+++ b/trunk/Documentation/hwmon/f71882fg
@@ -2,6 +2,10 @@ Kernel driver f71882fg
======================
Supported chips:
+ * Fintek F71808E
+ Prefix: 'f71808fg'
+ Addresses scanned: none, address read from Super I/O config space
+ Datasheet: Not public
* Fintek F71858FG
Prefix: 'f71858fg'
Addresses scanned: none, address read from Super I/O config space
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index 8dd7248508a9..2c85c0692b01 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -1974,18 +1974,15 @@ and is between 256 and 4096 characters. It is defined in the file
force Enable ASPM even on devices that claim not to support it.
WARNING: Forcing ASPM on may cause system lockups.
- pcie_ports= [PCIE] PCIe ports handling:
- auto Ask the BIOS whether or not to use native PCIe services
- associated with PCIe ports (PME, hot-plug, AER). Use
- them only if that is allowed by the BIOS.
- native Use native PCIe services associated with PCIe ports
- unconditionally.
- compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
- ports driver.
-
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
+ Format: {auto|force}[,nomsi]
+ auto Use native PCIe PME signaling if the BIOS allows the
+ kernel to control PCIe config registers of root ports.
+ force Use native PCIe PME signaling even if the BIOS refuses
+ to allow the kernel to control the relevant PCIe config
+ registers.
nomsi Do not use MSI for native PCIe PME signaling (this makes
- all PCIe root ports use INTx for all services).
+ all PCIe root ports use INTx for everything).
pcmv= [HW,PCMCIA] BadgePAD 4
@@ -2632,10 +2629,8 @@ and is between 256 and 4096 characters. It is defined in the file
aux-ide-disks -- unplug non-primary-master IDE devices
nics -- unplug network devices
all -- unplug all emulated devices (NICs and IDE disks)
- unnecessary -- unplugging emulated devices is
- unnecessary even if the host did not respond to
- the unplug protocol
- never -- do not unplug even if version check succeeds
+ ignore -- continue loading the Xen platform PCI driver even
+ if the version check failed
xirc2ps_cs= [NET,PCMCIA]
Format:
diff --git a/trunk/Documentation/laptops/thinkpad-acpi.txt b/trunk/Documentation/laptops/thinkpad-acpi.txt
index 1565eefd6fd5..f6f80257addb 100644
--- a/trunk/Documentation/laptops/thinkpad-acpi.txt
+++ b/trunk/Documentation/laptops/thinkpad-acpi.txt
@@ -1024,10 +1024,6 @@ ThinkPad-specific interface. The driver will disable its native
backlight brightness control interface if it detects that the standard
ACPI interface is available in the ThinkPad.
-If you want to use the thinkpad-acpi backlight brightness control
-instead of the generic ACPI video backlight brightness control for some
-reason, you should use the acpi_backlight=vendor kernel parameter.
-
The brightness_enable module parameter can be used to control whether
the LCD brightness control feature will be enabled when available.
brightness_enable=0 forces it to be disabled. brightness_enable=1
diff --git a/trunk/Documentation/lguest/Makefile b/trunk/Documentation/lguest/Makefile
index bebac6b4f332..28c8cdfcafd8 100644
--- a/trunk/Documentation/lguest/Makefile
+++ b/trunk/Documentation/lguest/Makefile
@@ -1,6 +1,5 @@
# This creates the demonstration utility "lguest" which runs a Linux guest.
-# Missing headers? Add "-I../../include -I../../arch/x86/include"
-CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
+CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
all: lguest
diff --git a/trunk/Documentation/lguest/lguest.c b/trunk/Documentation/lguest/lguest.c
index 8a6a8c6d4980..e9ce3c554514 100644
--- a/trunk/Documentation/lguest/lguest.c
+++ b/trunk/Documentation/lguest/lguest.c
@@ -39,14 +39,14 @@
#include
#include
#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "../../include/linux/lguest_launcher.h"
+#include "linux/lguest_launcher.h"
+#include "linux/virtio_config.h"
+#include "linux/virtio_net.h"
+#include "linux/virtio_blk.h"
+#include "linux/virtio_console.h"
+#include "linux/virtio_rng.h"
+#include "linux/virtio_ring.h"
+#include "asm/bootparam.h"
/*L:110
* We can ignore the 42 include files we need for this program, but I do want
* to draw attention to the use of kernel-style types.
@@ -1447,15 +1447,14 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name)
static void configure_device(int fd, const char *tapif, u32 ipaddr)
{
struct ifreq ifr;
- struct sockaddr_in sin;
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, tapif);
/* Don't read these incantations. Just cut & paste them like I did! */
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(ipaddr);
- memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = htonl(ipaddr);
if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
err(1, "Setting %s interface address", tapif);
ifr.ifr_flags = IFF_UP;
diff --git a/trunk/Documentation/mutex-design.txt b/trunk/Documentation/mutex-design.txt
index 38c10fd7f411..c91ccc0720fa 100644
--- a/trunk/Documentation/mutex-design.txt
+++ b/trunk/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
mutex semantics are sufficient for your code, then there are a couple
of advantages of mutexes:
- - 'struct mutex' is smaller on most architectures: E.g. on x86,
+ - 'struct mutex' is smaller on most architectures: .e.g on x86,
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
A smaller structure size means less RAM footprint, and better
CPU-cache utilization.
@@ -136,4 +136,3 @@ the APIs of 'struct mutex' have been streamlined:
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
int mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
- int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/trunk/Documentation/powerpc/booting-without-of.txt b/trunk/Documentation/powerpc/booting-without-of.txt
index 302db5da49b3..568fa08e82e5 100644
--- a/trunk/Documentation/powerpc/booting-without-of.txt
+++ b/trunk/Documentation/powerpc/booting-without-of.txt
@@ -49,13 +49,40 @@ Table of Contents
f) MDIO on GPIOs
g) SPI busses
- VII - Specifying interrupt information for devices
+ VII - Marvell Discovery mv64[345]6x System Controller chips
+ 1) The /system-controller node
+ 2) Child nodes of /system-controller
+ a) Marvell Discovery MDIO bus
+ b) Marvell Discovery ethernet controller
+ c) Marvell Discovery PHY nodes
+ d) Marvell Discovery SDMA nodes
+ e) Marvell Discovery BRG nodes
+ f) Marvell Discovery CUNIT nodes
+ g) Marvell Discovery MPSCROUTING nodes
+ h) Marvell Discovery MPSCINTR nodes
+ i) Marvell Discovery MPSC nodes
+ j) Marvell Discovery Watch Dog Timer nodes
+ k) Marvell Discovery I2C nodes
+ l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
+ m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
+ n) Marvell Discovery GPP (General Purpose Pins) nodes
+ o) Marvell Discovery PCI host bridge node
+ p) Marvell Discovery CPU Error nodes
+ q) Marvell Discovery SRAM Controller nodes
+ r) Marvell Discovery PCI Error Handler nodes
+ s) Marvell Discovery Memory Controller nodes
+
+ VIII - Specifying interrupt information for devices
1) interrupts property
2) interrupt-parent property
3) OpenPIC Interrupt Controllers
4) ISA Interrupt Controllers
- VIII - Specifying device power management information (sleep property)
+ IX - Specifying GPIO information for devices
+ 1) gpios property
+ 2) gpio-controller nodes
+
+ X - Specifying device power management information (sleep property)
Appendix A - Sample SOC node for MPC8540
diff --git a/trunk/Documentation/powerpc/hvcs.txt b/trunk/Documentation/powerpc/hvcs.txt
index 6d8be3468d7d..f93462c5db25 100644
--- a/trunk/Documentation/powerpc/hvcs.txt
+++ b/trunk/Documentation/powerpc/hvcs.txt
@@ -560,7 +560,7 @@ The proper channel for reporting bugs is either through the Linux OS
distribution company that provided your OS or by posting issues to the
PowerPC development mailing list at:
-linuxppc-dev@lists.ozlabs.org
+linuxppc-dev@ozlabs.org
This request is to provide a documented and searchable public exchange
of the problems and solutions surrounding this driver for the benefit of
diff --git a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
index 37c6aad5e590..ce46fa1e643e 100644
--- a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,7 +296,6 @@ Conexant 5051
Conexant 5066
=============
laptop Basic Laptop config (default)
- hp-laptop HP laptops, e g G60
dell-laptop Dell laptops
dell-vostro Dell Vostro
olpc-xo-1_5 OLPC XO 1.5
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 9800de5ec222..b5b8baa1d70e 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -454,20 +454,9 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
-ANALOG DEVICES INC ASOC DRIVERS
-L: uclinux-dist-devel@blackfin.uclinux.org
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-W: http://blackfin.uclinux.org/
-S: Supported
-F: sound/soc/blackfin/*
-F: sound/soc/codecs/ad1*
-F: sound/soc/codecs/adau*
-F: sound/soc/codecs/adav*
-F: sound/soc/codecs/ssm*
-
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/aoa/
@@ -1483,8 +1472,8 @@ F: include/linux/can/platform/
CELL BROADBAND ENGINE ARCHITECTURE
M: Arnd Bergmann
-L: linuxppc-dev@lists.ozlabs.org
-L: cbe-oss-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
+L: cbe-oss-dev@ozlabs.org
W: http://www.ibm.com/developerworks/power/cell/
S: Supported
F: arch/powerpc/include/asm/cell*.h
@@ -1676,7 +1665,8 @@ F: kernel/cgroup*
F: mm/*cgroup*
CORETEMP HARDWARE MONITORING DRIVER
-M: Fenghua Yu
+M: Rudolf Marek
+M: Huaxu Wan
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/coretemp
@@ -2201,12 +2191,6 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/ehca/
-EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Breno Leitao
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ehea/
-
EMBEDDED LINUX
M: Paul Gortmaker
M: Matt Mackall
@@ -2302,12 +2286,6 @@ S: Maintained
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
-FANOTIFY
-M: Eric Paris
-S: Maintained
-F: fs/notify/fanotify/
-F: include/linux/fanotify.h
-
FARSYNC SYNCHRONOUS DRIVER
M: Kevin Curtis
W: http://www.farsite.co.uk/
@@ -2393,13 +2371,13 @@ F: include/linux/fb.h
FREESCALE DMA DRIVER
M: Li Yang
M: Zhang Wei
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
FREESCALE I2C CPM DRIVER
M: Jochen Friedrich
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-cpm.c
@@ -2415,7 +2393,7 @@ F: drivers/video/imxfb.c
FREESCALE SOC FS_ENET DRIVER
M: Pantelis Antoniou
M: Vitaly Bordug
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/fs_enet/
@@ -2423,7 +2401,7 @@ F: include/linux/fs_enet_pd.h
FREESCALE QUICC ENGINE LIBRARY
M: Timur Tabi
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Supported
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
@@ -2431,27 +2409,27 @@ F: arch/powerpc/include/asm/*qe.h
FREESCALE USB PERIPHERAL DRIVERS
M: Li Yang
L: linux-usb@vger.kernel.org
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: drivers/usb/gadget/fsl*
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
M: Li Yang
L: netdev@vger.kernel.org
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: drivers/net/ucc_geth*
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Supported
F: drivers/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
M: Timur Tabi
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Supported
F: sound/soc/fsl/fsl*
F: sound/soc/fsl/mpc8610_hpcd.c
@@ -2586,7 +2564,7 @@ F: mm/memory-failure.c
F: mm/hwpoison-inject.c
HYPERVISOR VIRTUAL CONSOLE DRIVER
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Odd Fixes
F: drivers/char/hvc_*
@@ -2787,6 +2765,11 @@ S: Maintained
F: arch/x86/kernel/hpet.c
F: arch/x86/include/asm/hpet.h
+HPET: ACPI
+M: Bob Picco
+S: Maintained
+F: drivers/char/hpet.c
+
HPFS FILESYSTEM
M: Mikulas Patocka
W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3399,7 +3382,7 @@ F: drivers/s390/kvm/
KEXEC
M: Eric Biederman
-W: http://kernel.org/pub/linux/utils/kernel/kexec/
+W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
L: kexec@lists.infradead.org
S: Maintained
F: include/linux/kexec.h
@@ -3493,9 +3476,9 @@ F: drivers/usb/misc/legousbtower.c
LGUEST
M: Rusty Russell
-L: lguest@lists.ozlabs.org
+L: lguest@ozlabs.org
W: http://lguest.ozlabs.org/
-S: Odd Fixes
+S: Maintained
F: Documentation/lguest/
F: arch/x86/lguest/
F: drivers/lguest/
@@ -3512,7 +3495,7 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Benjamin Herrenschmidt
M: Paul Mackerras
W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
S: Supported
@@ -3522,14 +3505,14 @@ F: arch/powerpc/
LINUX FOR POWER MACINTOSH
M: Benjamin Herrenschmidt
W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: arch/powerpc/platforms/powermac/
F: drivers/macintosh/
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Grant Likely
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
F: arch/powerpc/platforms/512x/
@@ -3539,7 +3522,7 @@ LINUX FOR POWERPC EMBEDDED PPC4XX
M: Josh Boyer
M: Matt Porter
W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
S: Maintained
F: arch/powerpc/platforms/40x/
@@ -3548,7 +3531,7 @@ F: arch/powerpc/platforms/44x/
LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
M: Grant Likely
W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
F: arch/powerpc/*/*virtex*
@@ -3558,20 +3541,20 @@ LINUX FOR POWERPC EMBEDDED PPC8XX
M: Vitaly Bordug
M: Marcelo Tosatti
W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: arch/powerpc/platforms/8xx/
LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
M: Kumar Gala
W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: arch/powerpc/platforms/83xx/
LINUX FOR POWERPC PA SEMI PWRFICIENT
M: Olof Johansson
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
S: Maintained
F: arch/powerpc/platforms/pasemi/
F: drivers/*/*pasemi*
@@ -3924,7 +3907,8 @@ F: Documentation/sound/oss/MultiSound
F: sound/oss/msnd*
MULTITECH MULTIPORT CARD (ISICOM)
-S: Orphan
+M: Jiri Slaby
+S: Maintained
F: drivers/char/isicom.c
F: include/linux/isicom.h
@@ -4604,7 +4588,7 @@ F: include/linux/preempt.h
PRISM54 WIRELESS DRIVER
M: "Luis R. Rodriguez"
L: linux-wireless@vger.kernel.org
-W: http://wireless.kernel.org/en/users/Drivers/p54
+W: http://prism54.org
S: Obsolete
F: drivers/net/wireless/prism54/
@@ -4617,14 +4601,14 @@ F: drivers/ata/sata_promise.*
PS3 NETWORK SUPPORT
M: Geoff Levand
L: netdev@vger.kernel.org
-L: cbe-oss-dev@lists.ozlabs.org
+L: cbe-oss-dev@ozlabs.org
S: Maintained
F: drivers/net/ps3_gelic_net.*
PS3 PLATFORM SUPPORT
M: Geoff Levand
-L: linuxppc-dev@lists.ozlabs.org
-L: cbe-oss-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
+L: cbe-oss-dev@ozlabs.org
S: Maintained
F: arch/powerpc/boot/ps3*
F: arch/powerpc/include/asm/lv1call.h
@@ -4638,7 +4622,7 @@ F: sound/ppc/snd_ps3*
PS3VRAM DRIVER
M: Jim Paris
-L: cbe-oss-dev@lists.ozlabs.org
+L: cbe-oss-dev@ozlabs.org
S: Maintained
F: drivers/block/ps3vram.c
@@ -4805,7 +4789,6 @@ RCUTORTURE MODULE
M: Josh Triplett
M: "Paul E. McKenney"
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4830,7 +4813,6 @@ M: Dipankar Sarma
M: "Paul E. McKenney"
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
@@ -4838,10 +4820,12 @@ F: kernel/rcu*
F: kernel/srcu*
X: kernel/rcutorture.c
-REAL TIME CLOCK DRIVER (LEGACY)
+REAL TIME CLOCK DRIVER
M: Paul Gortmaker
S: Maintained
-F: drivers/char/rtc.c
+F: Documentation/rtc.txt
+F: drivers/rtc/
+F: include/linux/rtc.h
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo
@@ -5084,7 +5068,7 @@ F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
M: Anton Vorontsov
-L: linuxppc-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-of.*
@@ -5501,8 +5485,8 @@ F: drivers/net/spider_net*
SPU FILE SYSTEM
M: Jeremy Kerr
-L: linuxppc-dev@lists.ozlabs.org
-L: cbe-oss-dev@lists.ozlabs.org
+L: linuxppc-dev@ozlabs.org
+L: cbe-oss-dev@ozlabs.org
W: http://www.ibm.com/developerworks/power/cell/
S: Supported
F: Documentation/filesystems/spufs.txt
diff --git a/trunk/Makefile b/trunk/Makefile
index 4df9873f83b2..f3bdff8c8d78 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc1
NAME = Sheep on Meth
# *DOCUMENTATION*
@@ -1408,8 +1408,8 @@ checkstack:
$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
$(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
-kernelrelease:
- @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+kernelrelease: include/config/kernel.release
+ @echo $(KERNELRELEASE)
kernelversion:
@echo $(KERNELVERSION)
diff --git a/trunk/arch/alpha/include/asm/cache.h b/trunk/arch/alpha/include/asm/cache.h
index ad368a93a46a..f199e69a5d0b 100644
--- a/trunk/arch/alpha/include/asm/cache.h
+++ b/trunk/arch/alpha/include/asm/cache.h
@@ -17,6 +17,7 @@
# define L1_CACHE_SHIFT 5
#endif
+#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
diff --git a/trunk/arch/alpha/kernel/err_marvel.c b/trunk/arch/alpha/kernel/err_marvel.c
index 5c905aaaeccd..52a79dfc13c6 100644
--- a/trunk/arch/alpha/kernel/err_marvel.c
+++ b/trunk/arch/alpha/kernel/err_marvel.c
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
#define IO7__ERR_CYC__CYCLE__M (0x7)
printk("%s Packet In Error: %s\n"
- "%s Error in %s, cycle %lld%s%s\n",
+ "%s Error in %s, cycle %ld%s%s\n",
err_print_prefix,
packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
}
printk("%s Up Hose Garbage Symptom:\n"
- "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
+ "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
err_print_prefix,
err_print_prefix,
EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
printk("%s Split Completion Error:\n"
- "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n",
+ "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
err_print_prefix,
err_print_prefix,
EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 5d1e6d6ce684..fb58150a7e8f 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
retval = user_path(pathname, &path);
if (!retval) {
- retval = do_osf_statfs(&path, buffer, bufsiz);
+ retval = do_osf_statfs(&path buffer, bufsiz);
path_put(&path);
}
return retval;
diff --git a/trunk/arch/alpha/kernel/perf_event.c b/trunk/arch/alpha/kernel/perf_event.c
index 85d8e4f58c83..51c39fa41693 100644
--- a/trunk/arch/alpha/kernel/perf_event.c
+++ b/trunk/arch/alpha/kernel/perf_event.c
@@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx)
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
- long left = local64_read(&hwc->period_left);
+ long left = atomic64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
- local64_set(&hwc->period_left, left);
+ atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
- local64_set(&hwc->period_left, left);
+ atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
@@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event,
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
- local64_set(&hwc->prev_count, (unsigned long)(-left));
+ atomic64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
@@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
long delta;
again:
- prev_raw_count = local64_read(&hwc->prev_count);
+ prev_raw_count = atomic64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
@@ -316,8 +316,8 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
- local64_add(delta, &event->count);
- local64_sub(delta, &hwc->period_left);
+ atomic64_add(delta, &event->count);
+ atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
@@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event)
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
- local64_set(&hwc->period_left, hwc->sample_period);
+ atomic64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
diff --git a/trunk/arch/alpha/kernel/process.c b/trunk/arch/alpha/kernel/process.c
index 842dba308eab..88e608aebc8c 100644
--- a/trunk/arch/alpha/kernel/process.c
+++ b/trunk/arch/alpha/kernel/process.c
@@ -387,9 +387,8 @@ EXPORT_SYMBOL(dump_elf_task_fp);
* sys_execve() executes a new program.
*/
asmlinkage int
-do_sys_execve(const char __user *ufilename,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+do_sys_execve(const char __user *ufilename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
{
int error;
char *filename;
diff --git a/trunk/arch/alpha/kernel/proto.h b/trunk/arch/alpha/kernel/proto.h
index d3e52d3fd592..3d2627ec9860 100644
--- a/trunk/arch/alpha/kernel/proto.h
+++ b/trunk/arch/alpha/kernel/proto.h
@@ -156,6 +156,9 @@ extern void SMC669_Init(int);
/* es1888.c */
extern void es1888_init(void);
+/* ns87312.c */
+extern void ns87312_enable_ide(long ide_base);
+
/* ../lib/fpreg.c */
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg (unsigned long reg);
diff --git a/trunk/arch/alpha/kernel/sys_cabriolet.c b/trunk/arch/alpha/kernel/sys_cabriolet.c
index 14c8898d19ec..affd0f3f25df 100644
--- a/trunk/arch/alpha/kernel/sys_cabriolet.c
+++ b/trunk/arch/alpha/kernel/sys_cabriolet.c
@@ -33,7 +33,7 @@
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
-#include "pc873xx.h"
+
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask = ~0UL;
@@ -235,31 +235,18 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static inline void __init
-cabriolet_enable_ide(void)
-{
- if (pc873xx_probe() == -1) {
- printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
- } else {
- printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
- pc873xx_get_model(), pc873xx_get_base());
-
- pc873xx_enable_ide();
- }
-}
-
static inline void __init
cabriolet_init_pci(void)
{
common_init_pci();
- cabriolet_enable_ide();
+ ns87312_enable_ide(0x398);
}
static inline void __init
cia_cab_init_pci(void)
{
cia_init_pci();
- cabriolet_enable_ide();
+ ns87312_enable_ide(0x398);
}
/*
diff --git a/trunk/arch/alpha/kernel/sys_takara.c b/trunk/arch/alpha/kernel/sys_takara.c
index 4da596b6adbb..230464885b5c 100644
--- a/trunk/arch/alpha/kernel/sys_takara.c
+++ b/trunk/arch/alpha/kernel/sys_takara.c
@@ -29,7 +29,7 @@
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
-#include "pc873xx.h"
+
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask[2] = { -1, -1 };
@@ -264,14 +264,7 @@ takara_init_pci(void)
alpha_mv.pci_map_irq = takara_map_irq_srm;
cia_init_pci();
-
- if (pc873xx_probe() == -1) {
- printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
- } else {
- printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
- pc873xx_get_model(), pc873xx_get_base());
- pc873xx_enable_ide();
- }
+ ns87312_enable_ide(0x26e);
}
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 553b7cf17bfb..92951103255a 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -1576,6 +1576,95 @@ config AUTO_ZRELADDR
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
+config ZRELADDR
+ hex "Physical address of the decompressed kernel image"
+ depends on !AUTO_ZRELADDR
+ default 0x00008000 if ARCH_BCMRING ||\
+ ARCH_CNS3XXX ||\
+ ARCH_DOVE ||\
+ ARCH_EBSA110 ||\
+ ARCH_FOOTBRIDGE ||\
+ ARCH_INTEGRATOR ||\
+ ARCH_IOP13XX ||\
+ ARCH_IOP33X ||\
+ ARCH_IXP2000 ||\
+ ARCH_IXP23XX ||\
+ ARCH_IXP4XX ||\
+ ARCH_KIRKWOOD ||\
+ ARCH_KS8695 ||\
+ ARCH_LOKI ||\
+ ARCH_MMP ||\
+ ARCH_MV78XX0 ||\
+ ARCH_NOMADIK ||\
+ ARCH_NUC93X ||\
+ ARCH_NS9XXX ||\
+ ARCH_ORION5X ||\
+ ARCH_SPEAR3XX ||\
+ ARCH_SPEAR6XX ||\
+ ARCH_U8500 ||\
+ ARCH_VERSATILE ||\
+ ARCH_W90X900
+ default 0x08008000 if ARCH_MX1 ||\
+ ARCH_SHARK
+ default 0x10008000 if ARCH_MSM ||\
+ ARCH_OMAP1 ||\
+ ARCH_RPC
+ default 0x20008000 if ARCH_S5P6440 ||\
+ ARCH_S5P6442 ||\
+ ARCH_S5PC100 ||\
+ ARCH_S5PV210
+ default 0x30008000 if ARCH_S3C2410 ||\
+ ARCH_S3C2400 ||\
+ ARCH_S3C2412 ||\
+ ARCH_S3C2416 ||\
+ ARCH_S3C2440 ||\
+ ARCH_S3C2443
+ default 0x40008000 if ARCH_STMP378X ||\
+ ARCH_STMP37XX ||\
+ ARCH_SH7372 ||\
+ ARCH_SH7377
+ default 0x50008000 if ARCH_S3C64XX ||\
+ ARCH_SH7367
+ default 0x60008000 if ARCH_VEXPRESS
+ default 0x80008000 if ARCH_MX25 ||\
+ ARCH_MX3 ||\
+ ARCH_NETX ||\
+ ARCH_OMAP2PLUS ||\
+ ARCH_PNX4008
+ default 0x90008000 if ARCH_MX5 ||\
+ ARCH_MX91231
+ default 0xa0008000 if ARCH_IOP32X ||\
+ ARCH_PXA ||\
+ MACH_MX27
+ default 0xc0008000 if ARCH_LH7A40X ||\
+ MACH_MX21
+ default 0xf0008000 if ARCH_AAEC2000 ||\
+ ARCH_L7200
+ default 0xc0028000 if ARCH_CLPS711X
+ default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
+ default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
+ default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
+ default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
+ default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
+ default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
+ default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
+ default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
+ default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+ default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
+ default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
+ default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
+ default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
+ default 0xc0208000 if ARCH_SA1100 && SA1111
+ default 0xc0008000 if ARCH_SA1100 && !SA1111
+ default 0x30108000 if ARCH_S3C2410 && PM_H1940
+ default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
+ default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
+ help
+ ZRELADDR is the physical address where the decompressed kernel
+ image will be placed. ZRELADDR has to be specified when the
+ assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
+ selected.
+
endmenu
menu "CPU Power Management"
diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile
index 59c1ce858fc8..99b8200138d2 100644
--- a/trunk/arch/arm/Makefile
+++ b/trunk/arch/arm/Makefile
@@ -21,9 +21,6 @@ GZFLAGS :=-9
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
KBUILD_CFLAGS +=$(call cc-option,-marm,)
-# Never generate .eh_frame
-KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
-
# Do not use arch/arm/defconfig - it's always outdated.
# Select a platform tht is kept up-to-date
KBUILD_DEFCONFIG := versatile_defconfig
diff --git a/trunk/arch/arm/boot/Makefile b/trunk/arch/arm/boot/Makefile
index 4a590f4113e2..f705213caa88 100644
--- a/trunk/arch/arm/boot/Makefile
+++ b/trunk/arch/arm/boot/Makefile
@@ -14,18 +14,16 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
ifneq ($(MACHINE),)
-include $(srctree)/$(MACHINE)/Makefile.boot
+-include $(srctree)/$(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
-# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
-ZRELADDR := $(zreladdr-y)
PARAMS_PHYS := $(params_phys-y)
INITRD_PHYS := $(initrd_phys-y)
-export ZRELADDR INITRD_PHYS PARAMS_PHYS
+export INITRD_PHYS PARAMS_PHYS
targets := Image zImage xipImage bootpImage uImage
@@ -67,7 +65,7 @@ quiet_cmd_uimage = UIMAGE $@
ifeq ($(CONFIG_ZBOOT_ROM),y)
$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
else
-$(obj)/uImage: LOADADDR=$(ZRELADDR)
+$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
endif
ifeq ($(CONFIG_THUMB2_KERNEL),y)
diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile
index b23f6bc46cfa..68775e33476c 100644
--- a/trunk/arch/arm/boot/compressed/Makefile
+++ b/trunk/arch/arm/boot/compressed/Makefile
@@ -79,10 +79,6 @@ endif
EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all
-# Supply ZRELADDR to the decompressor via a linker symbol.
-ifneq ($(CONFIG_AUTO_ZRELADDR),y)
-LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
-endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S
index 6825c34646d4..6af9907c3b5c 100644
--- a/trunk/arch/arm/boot/compressed/head.S
+++ b/trunk/arch/arm/boot/compressed/head.S
@@ -177,7 +177,7 @@ not_angel:
and r4, pc, #0xf8000000
add r4, r4, #TEXT_OFFSET
#else
- ldr r4, =zreladdr
+ ldr r4, =CONFIG_ZRELADDR
#endif
subs r0, r0, r1 @ calculate the delta offset
diff --git a/trunk/arch/arm/common/it8152.c b/trunk/arch/arm/common/it8152.c
index 7974baacafce..6c0913562455 100644
--- a/trunk/arch/arm/common/it8152.c
+++ b/trunk/arch/arm/common/it8152.c
@@ -263,14 +263,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return 0;
}
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
- dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
- __func__, dma_addr, size);
- return (dev->bus == &pci_bus_type) &&
- ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
-}
-
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/trunk/arch/arm/configs/omap_4430sdp_defconfig b/trunk/arch/arm/configs/omap_4430sdp_defconfig
index 14c1e18c648f..63e0c2d50f32 100644
--- a/trunk/arch/arm/configs/omap_4430sdp_defconfig
+++ b/trunk/arch/arm/configs/omap_4430sdp_defconfig
@@ -13,9 +13,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_OMAP=y
CONFIG_ARCH_OMAP4=y
-# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
-# CONFIG_ARCH_OMAP2 is not set
-# CONFIG_ARCH_OMAP3 is not set
# CONFIG_OMAP_MUX is not set
CONFIG_OMAP_32K_TIMER=y
CONFIG_OMAP_DM_TIMER=y
diff --git a/trunk/arch/arm/include/asm/dma-mapping.h b/trunk/arch/arm/include/asm/dma-mapping.h
index c568da7dcae4..c226fe10553e 100644
--- a/trunk/arch/arm/include/asm/dma-mapping.h
+++ b/trunk/arch/arm/include/asm/dma-mapping.h
@@ -288,7 +288,15 @@ extern void dmabounce_unregister_dev(struct device *);
* DMA access and 1 if the buffer needs to be bounced.
*
*/
+#ifdef CONFIG_SA1111
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
+#else
+static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ return 0;
+}
+#endif
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/trunk/arch/arm/include/asm/perf_event.h b/trunk/arch/arm/include/asm/perf_event.h
index b5799a3b7117..48837e6d8887 100644
--- a/trunk/arch/arm/include/asm/perf_event.h
+++ b/trunk/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@
* counter interrupts are regular interrupts and not an NMI. This
* means that when we receive the interrupt we can call
* perf_event_do_pending() that handles all of the work with
- * interrupts disabled.
+ * interrupts enabled.
*/
static inline void
set_perf_event_pending(void)
diff --git a/trunk/arch/arm/include/asm/ptrace.h b/trunk/arch/arm/include/asm/ptrace.h
index 7ce15eb15f72..c974be8913a7 100644
--- a/trunk/arch/arm/include/asm/ptrace.h
+++ b/trunk/arch/arm/include/asm/ptrace.h
@@ -158,24 +158,15 @@ struct pt_regs {
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
- unsigned long mode = regs->ARM_cpsr & MODE_MASK;
-
- /*
- * Always clear the F (FIQ) and A (delayed abort) bits
- */
- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
-
- if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
- if (mode == USR_MODE)
- return 1;
- if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
- return 1;
+ if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+ return 1;
}
/*
* Force CPSR to something logical...
*/
- regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+ regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
if (!(elf_hwcap & HWCAP_26BIT))
regs->ARM_cpsr |= USR_MODE;
diff --git a/trunk/arch/arm/include/asm/unistd.h b/trunk/arch/arm/include/asm/unistd.h
index c891eb76c0e3..dd2bf53000fe 100644
--- a/trunk/arch/arm/include/asm/unistd.h
+++ b/trunk/arch/arm/include/asm/unistd.h
@@ -392,10 +392,6 @@
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
-#define __NR_accept4 (__NR_SYSCALL_BASE+366)
-#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
-#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
-#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
/*
* The following SWIs are ARM private.
diff --git a/trunk/arch/arm/kernel/calls.S b/trunk/arch/arm/kernel/calls.S
index 5c26eccef998..37ae301cc47c 100644
--- a/trunk/arch/arm/kernel/calls.S
+++ b/trunk/arch/arm/kernel/calls.S
@@ -375,10 +375,6 @@
CALL(sys_rt_tgsigqueueinfo)
CALL(sys_perf_event_open)
/* 365 */ CALL(sys_recvmmsg)
- CALL(sys_accept4)
- CALL(sys_fanotify_init)
- CALL(sys_fanotify_mark)
- CALL(sys_prlimit64)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/trunk/arch/arm/kernel/etm.c b/trunk/arch/arm/kernel/etm.c
index 33c7077174db..56418f98cd01 100644
--- a/trunk/arch/arm/kernel/etm.c
+++ b/trunk/arch/arm/kernel/etm.c
@@ -230,7 +230,7 @@ static void etm_dump(void)
etb_lock(t);
}
-static void sysrq_etm_dump(int key)
+static void sysrq_etm_dump(int key, struct tty_struct *tty)
{
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
diff --git a/trunk/arch/arm/kernel/kgdb.c b/trunk/arch/arm/kernel/kgdb.c
index d6e8b4d2e60d..778c2f7024ff 100644
--- a/trunk/arch/arm/kernel/kgdb.c
+++ b/trunk/arch/arm/kernel/kgdb.c
@@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
return;
/* Initialize to zero */
- for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
+ for (regno = 0; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c
index ecbb0288e5dd..417c392ddf1c 100644
--- a/trunk/arch/arm/kernel/perf_event.c
+++ b/trunk/arch/arm/kernel/perf_event.c
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
{
struct hw_perf_event fake_event = event->hw;
- if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
- return 1;
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
}
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
+ * Note: this call *must* be run with interrupts enabled. For
+ * platforms that can have the PMU interrupts raised as a PMI, this
* will not work.
*/
perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
+ * Note: this call *must* be run with interrupts enabled. For
+ * platforms that can have the PMU interrupts raised as a PMI, this
* will not work.
*/
perf_event_do_pending();
diff --git a/trunk/arch/arm/kernel/sys_arm.c b/trunk/arch/arm/kernel/sys_arm.c
index 62e7c61d0342..5b7c541a4c63 100644
--- a/trunk/arch/arm/kernel/sys_arm.c
+++ b/trunk/arch/arm/kernel/sys_arm.c
@@ -62,9 +62,8 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
/* sys_execve() executes a new program.
* This is called indirectly via a small wrapper
*/
-asmlinkage int sys_execve(const char __user *filenamei,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+asmlinkage int sys_execve(const char __user *filenamei, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
{
int error;
char * filename;
@@ -79,17 +78,14 @@ asmlinkage int sys_execve(const char __user *filenamei,
return error;
}
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
struct pt_regs regs;
int ret;
memset(®s, 0, sizeof(struct pt_regs));
- ret = do_execve(filename,
- (const char __user *const __user *)argv,
- (const char __user *const __user *)envp, ®s);
+ ret = do_execve(filename, (char __user * __user *)argv,
+ (char __user * __user *)envp, ®s);
if (ret < 0)
goto out;
diff --git a/trunk/arch/arm/mach-at91/at91sam9g45.c b/trunk/arch/arm/mach-at91/at91sam9g45.c
index c67b47f1c0fd..753c0d31a3d3 100644
--- a/trunk/arch/arm/mach-at91/at91sam9g45.c
+++ b/trunk/arch/arm/mach-at91/at91sam9g45.c
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
.pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
.type = CLK_TYPE_PERIPHERAL,
};
-static struct clk tcb0_clk = {
- .name = "tcb0_clk",
+static struct clk tcb_clk = {
+ .name = "tcb_clk",
.pmc_mask = 1 << AT91SAM9G45_ID_TCB,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -192,14 +192,6 @@ static struct clk ohci_clk = {
.parent = &uhphs_clk,
};
-/* One additional fake clock for second TC block */
-static struct clk tcb1_clk = {
- .name = "tcb1_clk",
- .pmc_mask = 0,
- .type = CLK_TYPE_PERIPHERAL,
- .parent = &tcb0_clk,
-};
-
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
@@ -216,7 +208,7 @@ static struct clk *periph_clocks[] __initdata = {
&spi1_clk,
&ssc0_clk,
&ssc1_clk,
- &tcb0_clk,
+ &tcb_clk,
&pwm_clk,
&tsc_clk,
&dma_clk,
@@ -229,7 +221,6 @@ static struct clk *periph_clocks[] __initdata = {
&mmc1_clk,
// irq0
&ohci_clk,
- &tcb1_clk,
};
/*
diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
index 5e71ccd5e7d3..809114d5a5a6 100644
--- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
.end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
- [1] = {
+ [2] = {
.start = AT91SAM9G45_ID_DMA,
.end = AT91SAM9G45_ID_DMA,
.flags = IORESOURCE_IRQ,
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
static void __init at91_add_device_tc(void)
{
/* this chip has one clock and irq for all six TC channels */
- at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+ at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb0_device);
- at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+ at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb1_device);
}
#else
diff --git a/trunk/arch/arm/mach-at91/board-sam9261ek.c b/trunk/arch/arm/mach-at91/board-sam9261ek.c
index 65eb0943194f..c4c8865d52d7 100644
--- a/trunk/arch/arm/mach-at91/board-sam9261ek.c
+++ b/trunk/arch/arm/mach-at91/board-sam9261ek.c
@@ -93,12 +93,11 @@ static struct resource dm9000_resource[] = {
.start = AT91_PIN_PC11,
.end = AT91_PIN_PC11,
.flags = IORESOURCE_IRQ
- | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct dm9000_plat_data dm9000_platdata = {
- .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
+ .flags = DM9000_PLATF_16BITONLY,
};
static struct platform_device dm9000_device = {
@@ -168,6 +167,17 @@ static struct at91_udc_data __initdata ek_udc_data = {
};
+/*
+ * MCI (SD/MMC)
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+ .wire4 = 1,
+// .det_pin = ... not connected
+// .wp_pin = ... not connected
+// .vcc_pin = ... not connected
+};
+
+
/*
* NAND flash
*/
@@ -236,10 +246,6 @@ static void __init ek_add_device_nand(void)
at91_add_device_nand(&ek_nand_data);
}
-/*
- * SPI related devices
- */
-#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
/*
* ADS7846 Touchscreen
@@ -350,19 +356,6 @@ static struct spi_board_info ek_spi_devices[] = {
#endif
};
-#else /* CONFIG_SPI_ATMEL_* */
-/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
-
-/*
- * MCI (SD/MMC)
- * det_pin, wp_pin and vcc_pin are not connected
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
- .wire4 = 1,
-};
-
-#endif /* CONFIG_SPI_ATMEL_* */
-
/*
* LCD Controller
diff --git a/trunk/arch/arm/mach-at91/clock.c b/trunk/arch/arm/mach-at91/clock.c
index 7525cee3983f..7f7da439341f 100644
--- a/trunk/arch/arm/mach-at91/clock.c
+++ b/trunk/arch/arm/mach-at91/clock.c
@@ -501,8 +501,7 @@ postcore_initcall(at91_clk_debugfs_init);
int __init clk_register(struct clk *clk)
{
if (clk_is_peripheral(clk)) {
- if (!clk->parent)
- clk->parent = &mck;
+ clk->parent = &mck;
clk->mode = pmc_periph_mode;
list_add_tail(&clk->node, &clocks);
}
diff --git a/trunk/arch/arm/mach-ep93xx/clock.c b/trunk/arch/arm/mach-ep93xx/clock.c
index 4566bd1c8660..8bf3cec98cfa 100644
--- a/trunk/arch/arm/mach-ep93xx/clock.c
+++ b/trunk/arch/arm/mach-ep93xx/clock.c
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
clkdev_add_table(clocks, ARRAY_SIZE(clocks));
return 0;
}
-postcore_initcall(ep93xx_clock_init);
+arch_initcall(ep93xx_clock_init);
diff --git a/trunk/arch/arm/mach-imx/mach-cpuimx27.c b/trunk/arch/arm/mach-imx/mach-cpuimx27.c
index 339150ab0ea5..575ff1ae85a7 100644
--- a/trunk/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/trunk/arch/arm/mach-imx/mach-cpuimx27.c
@@ -279,13 +279,13 @@ static void __init eukrea_cpuimx27_init(void)
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
#endif
diff --git a/trunk/arch/arm/mach-imx/mach-pca100.c b/trunk/arch/arm/mach-imx/mach-pca100.c
index 23c9e1f37b9c..a389d1148f18 100644
--- a/trunk/arch/arm/mach-imx/mach-pca100.c
+++ b/trunk/arch/arm/mach-imx/mach-pca100.c
@@ -419,13 +419,13 @@ static void __init pca100_init(void)
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
#endif
diff --git a/trunk/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/trunk/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index 4aaadc753d3e..91931dcb0689 100644
--- a/trunk/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/trunk/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd25_baseboard_init(void)
+void __init eukrea_mbimxsd_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/trunk/arch/arm/mach-mx25/mach-cpuimx25.c b/trunk/arch/arm/mach-mx25/mach-cpuimx25.c
index e064bb3d6919..56b2e26d23b4 100644
--- a/trunk/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/trunk/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -138,7 +138,7 @@ static void __init eukrea_cpuimx25_init(void)
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_otg, &otg_pdata);
}
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
if (!otg_mode_host)
mxc_register_device(&otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
- eukrea_mbimxsd25_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
+ eukrea_mbimxsd_baseboard_init();
#endif
}
diff --git a/trunk/arch/arm/mach-mx3/clock-imx35.c b/trunk/arch/arm/mach-mx3/clock-imx35.c
index 7a62e744a8b0..d3af0fdf8475 100644
--- a/trunk/arch/arm/mach-mx3/clock-imx35.c
+++ b/trunk/arch/arm/mach-mx3/clock-imx35.c
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
if (aad->sel)
- fref = fref * 3 / 4;
+ fref = fref * 2 / 3;
return fref / aad->arm;
}
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
struct arm_ahb_div *aad;
- unsigned long fref = get_rate_arm();
+ unsigned long fref = get_rate_mpll();
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
@@ -176,11 +176,16 @@ static unsigned long get_rate_ipg(struct clk *clk)
return get_rate_ahb(NULL) >> 1;
}
+static unsigned long get_3_3_div(unsigned long in)
+{
+ return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
+}
+
static unsigned long get_rate_uart(struct clk *clk)
{
unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
+ unsigned long div = get_3_3_div(pdr4 >> 10);
if (pdr3 & (1 << 14))
return get_rate_arm() / div;
@@ -211,7 +216,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
break;
}
- return rate / (div + 1);
+ return rate / get_3_3_div(div);
}
static unsigned long get_rate_mshc(struct clk *clk)
@@ -265,7 +270,7 @@ static unsigned long get_rate_csi(struct clk *clk)
else
rate = get_rate_ppll();
- return rate / (((pdr2 >> 16) & 0x3f) + 1);
+ return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
}
static unsigned long get_rate_otg(struct clk *clk)
@@ -278,51 +283,25 @@ static unsigned long get_rate_otg(struct clk *clk)
else
rate = get_rate_ppll();
- return rate / (((pdr4 >> 22) & 0x3f) + 1);
+ return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
}
static unsigned long get_rate_ipg_per(struct clk *clk)
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div;
+ unsigned long div1, div2;
if (pdr0 & (1 << 26)) {
- div = (pdr4 >> 16) & 0x3f;
- return get_rate_arm() / (div + 1);
+ div1 = (pdr4 >> 19) & 0x7;
+ div2 = (pdr4 >> 16) & 0x7;
+ return get_rate_arm() / ((div1 + 1) * (div2 + 1));
} else {
- div = (pdr0 >> 12) & 0x7;
- return get_rate_ahb(NULL) / (div + 1);
+ div1 = (pdr0 >> 12) & 0x7;
+ return get_rate_ahb(NULL) / div1;
}
}
-static unsigned long get_rate_hsp(struct clk *clk)
-{
- unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
- unsigned long fref = get_rate_mpll();
-
- if (fref > 400 * 1000 * 1000) {
- switch (hsp_podf) {
- case 0:
- return fref >> 2;
- case 1:
- return fref >> 3;
- case 2:
- return fref / 3;
- }
- } else {
- switch (hsp_podf) {
- case 0:
- case 2:
- return fref / 3;
- case 1:
- return fref / 6;
- }
- }
-
- return 0;
-}
-
static int clk_cgr_enable(struct clk *clk)
{
u32 reg;
@@ -380,7 +359,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
+DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL);
DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -506,10 +485,10 @@ static struct clk_lookup lookups[] = {
int __init mx35_clocks_init()
{
- unsigned int cgr2 = 3 << 26, cgr3 = 0;
+ unsigned int ll = 0;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- cgr2 |= 3 << 16;
+ ll = (3 << 16);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -520,20 +499,8 @@ int __init mx35_clocks_init()
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
CCM_BASE + CCM_CGR1);
-
- /*
- * Check if we came up in internal boot mode. If yes, we need some
- * extra clocks turned on, otherwise the MX35 boot ROM code will
- * hang after a watchdog reset.
- */
- if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
- /* Additionally turn on UART1, SCC, and IIM clocks */
- cgr2 |= 3 << 16 | 3 << 4;
- cgr3 |= 3 << 2;
- }
-
- __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
- __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
+ __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
+ __raw_writel(0, CCM_BASE + CCM_CGR3);
mxc_timer_init(&gpt_clk,
MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
diff --git a/trunk/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/trunk/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index f8f15e3ac7a0..1dc5004df866 100644
--- a/trunk/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/trunk/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd35_baseboard_init(void)
+void __init eukrea_mbimxsd_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/trunk/arch/arm/mach-mx3/mach-cpuimx35.c b/trunk/arch/arm/mach-mx3/mach-cpuimx35.c
index 2a4f8b781ba4..63f970f340a2 100644
--- a/trunk/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/trunk/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -192,7 +192,7 @@ static void __init mxc_board_init(void)
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
if (!otg_mode_host)
mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
- eukrea_mbimxsd35_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
+ eukrea_mbimxsd_baseboard_init();
#endif
}
diff --git a/trunk/arch/arm/mach-mx5/clock-mx51.c b/trunk/arch/arm/mach-mx5/clock-mx51.c
index 57c10a9926cc..6af69def357f 100644
--- a/trunk/arch/arm/mach-mx5/clock-mx51.c
+++ b/trunk/arch/arm/mach-mx5/clock-mx51.c
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
- reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
+ reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile
index 88d3a1e920f5..63b2d8859c3c 100644
--- a/trunk/arch/arm/mach-omap2/Makefile
+++ b/trunk/arch/arm/mach-omap2/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
-AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a
AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a
# Functions loaded to SRAM
diff --git a/trunk/arch/arm/mach-omap2/clock3xxx_data.c b/trunk/arch/arm/mach-omap2/clock3xxx_data.c
index dfdce2d82779..138646deac89 100644
--- a/trunk/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/trunk/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3417,13 +3417,7 @@ int __init omap3xxx_clk_init(void)
struct omap_clk *c;
u32 cpu_clkflg = CK_3XXX;
- if (cpu_is_omap3517()) {
- cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
- cpu_clkflg |= CK_3517;
- } else if (cpu_is_omap3505()) {
- cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
- cpu_clkflg |= CK_3505;
- } else if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx()) {
cpu_mask = RATE_IN_3XXX;
cpu_clkflg |= CK_343X;
@@ -3438,6 +3432,12 @@ int __init omap3xxx_clk_init(void)
cpu_mask |= RATE_IN_3430ES2PLUS;
cpu_clkflg |= CK_3430ES2;
}
+ } else if (cpu_is_omap3517()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3517;
+ } else if (cpu_is_omap3505()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3505;
}
if (omap3_has_192mhz_clk())
diff --git a/trunk/arch/arm/mach-omap2/id.c b/trunk/arch/arm/mach-omap2/id.c
index 9a879f959509..e8256a2ed8e7 100644
--- a/trunk/arch/arm/mach-omap2/id.c
+++ b/trunk/arch/arm/mach-omap2/id.c
@@ -284,8 +284,8 @@ static void __init omap3_check_revision(void)
default:
omap_revision = OMAP3630_REV_ES1_2;
omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
+ break;
}
- break;
default:
/* Unknown default to latest silicon rev as default*/
omap_revision = OMAP3630_REV_ES1_2;
diff --git a/trunk/arch/arm/mach-omap2/include/mach/entry-macro.S b/trunk/arch/arm/mach-omap2/include/mach/entry-macro.S
index 06e64e1fc28a..50fd74916643 100644
--- a/trunk/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/trunk/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -177,10 +177,7 @@ omap_irq_base: .word 0
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
.endm
-#endif
-#endif /* MULTI_OMAP2 */
-#ifdef CONFIG_SMP
/* We assume that irqstat (the raw value of the IRQ acknowledge
* register) is preserved from the macro above.
* If there is an IPI, we immediately signal end of interrupt
@@ -208,7 +205,8 @@ omap_irq_base: .word 0
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm
-#endif /* CONFIG_SMP */
+#endif
+#endif /* MULTI_OMAP2 */
.macro irq_prio_table
.endm
diff --git a/trunk/arch/arm/mach-omap2/omap-smp.c b/trunk/arch/arm/mach-omap2/omap-smp.c
index 9e9f70e18e3c..af3c20c8d3f9 100644
--- a/trunk/arch/arm/mach-omap2/omap-smp.c
+++ b/trunk/arch/arm/mach-omap2/omap-smp.c
@@ -102,7 +102,8 @@ static void __init wakeup_secondary(void)
* Send a 'sev' to wake the secondary core from WFE.
* Drain the outstanding writes to memory
*/
- dsb_sev();
+ dsb();
+ set_event();
mb();
}
diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c
index 7b03426c72a3..fb4994ad622e 100644
--- a/trunk/arch/arm/mach-omap2/pm34xx.c
+++ b/trunk/arch/arm/mach-omap2/pm34xx.c
@@ -480,9 +480,7 @@ void omap_sram_idle(void)
}
/* Disable IO-PAD and IO-CHAIN wakeup */
- if (omap3_has_io_wakeup() &&
- (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)) {
+ if (omap3_has_io_wakeup() && core_next_state < PWRDM_POWER_ON) {
prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_disable_io_chain();
}
diff --git a/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 50d5939a78f1..268a9bc6be8a 100644
--- a/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
return 0;
}
-static int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
{
int i;
unsigned int freq;
diff --git a/trunk/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/trunk/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 0a0d0fe99220..27fa329d9a8b 100644
--- a/trunk/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/trunk/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
return 0;
}
-static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
{
int ret = -EINVAL;
diff --git a/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
index 4e1287070d21..7139e0dc26d1 100644
--- a/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
+++ b/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
@@ -71,10 +71,10 @@
#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
+#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
+#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
-#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
-#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
/* KEYPAD */
#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
diff --git a/trunk/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/trunk/arch/arm/mach-s3c2410/include/mach/vmalloc.h
index 54297eb0bf5e..315b0078a34d 100644
--- a/trunk/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s3c2410/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END (0xE0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/trunk/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
index bc0e91389864..7411ef3711a6 100644
--- a/trunk/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END (0xE0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s5p6440/include/mach/vmalloc.h b/trunk/arch/arm/mach-s5p6440/include/mach/vmalloc.h
index e3f0eebf5205..16df257b1dce 100644
--- a/trunk/arch/arm/mach-s5p6440/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s5p6440/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END (0xE0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/trunk/arch/arm/mach-s5p6442/include/mach/vmalloc.h
index f5c83f02c18e..be3333688c20 100644
--- a/trunk/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s5p6442/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xE0000000UL
+#define VMALLOC_END (0xE0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/trunk/arch/arm/mach-s5pv210/include/mach/vmalloc.h
index df9a28808323..58f515e0747e 100644
--- a/trunk/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s5pv210/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xE0000000UL)
+#define VMALLOC_END (0xE0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s5pv310/clock.c b/trunk/arch/arm/mach-s5pv310/clock.c
index 26a0f03df8ea..77f2b4d85e6b 100644
--- a/trunk/arch/arm/mach-s5pv310/clock.c
+++ b/trunk/arch/arm/mach-s5pv310/clock.c
@@ -30,16 +30,6 @@ static struct clk clk_sclk_hdmi27m = {
.rate = 27000000,
};
-static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
-}
-
-static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
-}
-
/* Core list of CMU_CPU side */
static struct clksrc_clk clk_mout_apll = {
@@ -49,14 +39,6 @@ static struct clksrc_clk clk_mout_apll = {
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 },
-};
-
-static struct clksrc_clk clk_sclk_apll = {
- .clk = {
- .name = "sclk_apll",
- .id = -1,
- .parent = &clk_mout_apll.clk,
- },
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 },
};
@@ -79,7 +61,7 @@ static struct clksrc_clk clk_mout_mpll = {
};
static struct clk *clkset_moutcore_list[] = {
- [0] = &clk_sclk_apll.clk,
+ [0] = &clk_mout_apll.clk,
[1] = &clk_mout_mpll.clk,
};
@@ -172,7 +154,7 @@ static struct clksrc_clk clk_pclk_dbg = {
static struct clk *clkset_corebus_list[] = {
[0] = &clk_mout_mpll.clk,
- [1] = &clk_sclk_apll.clk,
+ [1] = &clk_mout_apll.clk,
};
static struct clksrc_sources clkset_mout_corebus = {
@@ -238,7 +220,7 @@ static struct clksrc_clk clk_pclk_acp = {
static struct clk *clkset_aclk_top_list[] = {
[0] = &clk_mout_mpll.clk,
- [1] = &clk_sclk_apll.clk,
+ [1] = &clk_mout_apll.clk,
};
static struct clksrc_sources clkset_aclk_200 = {
@@ -339,6 +321,11 @@ static struct clksrc_clk clk_sclk_vpll = {
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
};
+static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
+}
+
static struct clk init_clocks_disable[] = {
{
.name = "timers",
@@ -350,37 +337,7 @@ static struct clk init_clocks_disable[] = {
};
static struct clk init_clocks[] = {
- {
- .name = "uart",
- .id = 0,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 0),
- }, {
- .name = "uart",
- .id = 1,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 1),
- }, {
- .name = "uart",
- .id = 2,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 2),
- }, {
- .name = "uart",
- .id = 3,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 3),
- }, {
- .name = "uart",
- .id = 4,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "uart",
- .id = 5,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 5),
- }
+ /* Nothing here yet */
};
static struct clk *clkset_group_list[] = {
@@ -402,8 +359,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 0,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 0),
+ .enable = s5pv310_clk_ip_peril_ctrl,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
@@ -412,8 +369,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 1,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 4),
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 1),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
@@ -422,8 +379,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 2,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 8),
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 2),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
@@ -432,8 +389,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 3,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 12),
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 3),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
@@ -442,7 +399,7 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_pwm",
.id = -1,
- .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .enable = s5pv310_clk_ip_peril_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_group,
@@ -454,7 +411,6 @@ static struct clksrc_clk clksrcs[] = {
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
- &clk_sclk_apll,
&clk_mout_epll,
&clk_mout_mpll,
&clk_moutcore,
@@ -514,11 +470,11 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508);
epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0),
- __raw_readl(S5P_EPLL_CON1), pll_4600);
+ __raw_readl(S5P_EPLL_CON1), pll_4500);
vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
- __raw_readl(S5P_VPLL_CON1), pll_4650);
+ __raw_readl(S5P_VPLL_CON1), pll_4502);
clk_fout_apll.rate = apll;
clk_fout_mpll.rate = mpll;
diff --git a/trunk/arch/arm/mach-s5pv310/cpu.c b/trunk/arch/arm/mach-s5pv310/cpu.c
index e5b261a99ab2..196c9f12ed85 100644
--- a/trunk/arch/arm/mach-s5pv310/cpu.c
+++ b/trunk/arch/arm/mach-s5pv310/cpu.c
@@ -45,16 +45,6 @@ static struct map_desc s5pv310_iodesc[] __initdata = {
.pfn = __phys_to_pfn(S5PV310_PA_L2CC),
.length = SZ_4K,
.type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_SYSRAM,
- .pfn = __phys_to_pfn(S5PV310_PA_SYSRAM),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_CMU,
- .pfn = __phys_to_pfn(S5PV310_PA_CMU),
- .length = SZ_128K,
- .type = MT_DEVICE,
},
};
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/irqs.h b/trunk/arch/arm/mach-s5pv310/include/mach/irqs.h
index 4cdedda6e652..56885ca3773c 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -15,14 +15,12 @@
#include
-/* PPI: Private Peripheral Interrupt */
-
+/* Private Peripheral Interrupt */
#define IRQ_PPI(x) S5P_IRQ(x+16)
#define IRQ_LOCALTIMER IRQ_PPI(13)
-/* SPI: Shared Peripheral Interrupt */
-
+/* Shared Peripheral Interrupt */
#define IRQ_SPI(x) S5P_IRQ(x+32)
#define IRQ_EINT0 IRQ_SPI(40)
@@ -38,7 +36,7 @@
#define IRQ_PCIE IRQ_SPI(50)
#define IRQ_SYSTEM_TIMER IRQ_SPI(51)
#define IRQ_MFC IRQ_SPI(52)
-#define IRQ_WDT IRQ_SPI(53)
+#define IRQ_WTD IRQ_SPI(53)
#define IRQ_AUDIO_SS IRQ_SPI(54)
#define IRQ_AC97 IRQ_SPI(55)
#define IRQ_SPDIF IRQ_SPI(56)
@@ -69,9 +67,8 @@
#define IRQ_IIC COMBINER_IRQ(27, 0)
/* Set the default NR_IRQS */
-
#define NR_IRQS COMBINER_IRQ(MAX_COMBINER_NR, 0)
#define MAX_COMBINER_NR 39
-#endif /* __ASM_ARCH_IRQS_H */
+#endif /* ASM_ARCH_IRQS_H */
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/map.h b/trunk/arch/arm/mach-s5pv310/include/mach/map.h
index 213e1101a3b3..87697c9fca5b 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/map.h
@@ -23,16 +23,12 @@
#include
-#define S5PV310_PA_SYSRAM (0x02025000)
-
#define S5PV310_PA_CHIPID (0x10000000)
#define S5P_PA_CHIPID S5PV310_PA_CHIPID
#define S5PV310_PA_SYSCON (0x10020000)
#define S5P_PA_SYSCON S5PV310_PA_SYSCON
-#define S5PV310_PA_CMU (0x10030000)
-
#define S5PV310_PA_WATCHDOG (0x10060000)
#define S5PV310_PA_COMBINER (0x10448000)
@@ -43,12 +39,8 @@
#define S5PV310_PA_GIC_DIST (0x10501000)
#define S5PV310_PA_L2CC (0x10502000)
-#define S5PV310_PA_GPIO1 (0x11400000)
-#define S5PV310_PA_GPIO2 (0x11000000)
-#define S5PV310_PA_GPIO3 (0x03860000)
-#define S5P_PA_GPIO S5PV310_PA_GPIO1
-
-#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
+#define S5PV310_PA_GPIO (0x11000000)
+#define S5P_PA_GPIO S5PV310_PA_GPIO
#define S5PV310_PA_UART (0x13800000)
@@ -71,10 +63,6 @@
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV310_PA_UART
-#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
-#define S3C_PA_HSMMC3 S5PV310_PA_HSMMC(3)
#define S3C_PA_IIC S5PV310_PA_IIC0
#define S3C_PA_WDT S5PV310_PA_WATCHDOG
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/trunk/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index 4013553cd9be..59e3a7e94d80 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -15,49 +15,48 @@
#include
-#define S5P_CLKREG(x) (S5P_VA_CMU + (x))
+#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
#define S5P_INFORM0 S5P_CLKREG(0x800)
-#define S5P_EPLL_CON0 S5P_CLKREG(0x0C110)
-#define S5P_EPLL_CON1 S5P_CLKREG(0x0C114)
-#define S5P_VPLL_CON0 S5P_CLKREG(0x0C120)
-#define S5P_VPLL_CON1 S5P_CLKREG(0x0C124)
+#define S5P_EPLL_CON0 S5P_CLKREG(0x1C110)
+#define S5P_EPLL_CON1 S5P_CLKREG(0x1C114)
+#define S5P_VPLL_CON0 S5P_CLKREG(0x1C120)
+#define S5P_VPLL_CON1 S5P_CLKREG(0x1C124)
-#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x0C210)
-#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x0C214)
+#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x1C210)
+#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x1C214)
-#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x0C250)
+#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x1C250)
-#define S5P_CLKDIV_TOP S5P_CLKREG(0x0C510)
+#define S5P_CLKDIV_TOP S5P_CLKREG(0x1C510)
-#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x0C550)
-#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x0C554)
-#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x0C558)
-#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x0C55C)
-#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x0C560)
-#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x0C564)
+#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x1C550)
+#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x1C554)
+#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x1C558)
+#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x1C55C)
+#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x1C560)
+#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x1C564)
-#define S5P_CLKSRC_MASK_PERIL0 S5P_CLKREG(0x0C350)
+#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x1C950)
-#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x0C950)
+#define S5P_CLKSRC_CORE S5P_CLKREG(0x20200)
-#define S5P_CLKSRC_CORE S5P_CLKREG(0x10200)
-#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x10500)
+#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x20500)
-#define S5P_APLL_LOCK S5P_CLKREG(0x14000)
-#define S5P_MPLL_LOCK S5P_CLKREG(0x14004)
-#define S5P_APLL_CON0 S5P_CLKREG(0x14100)
-#define S5P_APLL_CON1 S5P_CLKREG(0x14104)
-#define S5P_MPLL_CON0 S5P_CLKREG(0x14108)
-#define S5P_MPLL_CON1 S5P_CLKREG(0x1410C)
+#define S5P_APLL_LOCK S5P_CLKREG(0x24000)
+#define S5P_MPLL_LOCK S5P_CLKREG(0x24004)
+#define S5P_APLL_CON0 S5P_CLKREG(0x24100)
+#define S5P_APLL_CON1 S5P_CLKREG(0x24104)
+#define S5P_MPLL_CON0 S5P_CLKREG(0x24108)
+#define S5P_MPLL_CON1 S5P_CLKREG(0x2410C)
-#define S5P_CLKSRC_CPU S5P_CLKREG(0x14200)
-#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x14400)
+#define S5P_CLKSRC_CPU S5P_CLKREG(0x24200)
+#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x24400)
-#define S5P_CLKDIV_CPU S5P_CLKREG(0x14500)
-#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x14600)
+#define S5P_CLKDIV_CPU S5P_CLKREG(0x24500)
+#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x24600)
-#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x14800)
+#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x24800)
#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/vmalloc.h b/trunk/arch/arm/mach-s5pv310/include/mach/vmalloc.h
index 256f221edf3a..3f565ebb7daa 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xF0000000UL)
+#define VMALLOC_END (0xF0000000)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/trunk/arch/arm/mach-s5pv310/platsmp.c b/trunk/arch/arm/mach-s5pv310/platsmp.c
index d357c198edee..fe9469abd006 100644
--- a/trunk/arch/arm/mach-s5pv310/platsmp.c
+++ b/trunk/arch/arm/mach-s5pv310/platsmp.c
@@ -187,6 +187,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
+ __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_INFORM0);
}
}
diff --git a/trunk/arch/arm/mach-shmobile/Makefile b/trunk/arch/arm/mach-shmobile/Makefile
index ae416fe7daf2..5e16b4c69222 100644
--- a/trunk/arch/arm/mach-shmobile/Makefile
+++ b/trunk/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
#
# Common objects
-obj-y := timer.o console.o clock.o pm_runtime.o
+obj-y := timer.o console.o clock.o
# CPU objects
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/trunk/arch/arm/mach-shmobile/board-ap4evb.c b/trunk/arch/arm/mach-shmobile/board-ap4evb.c
index 95935c83c306..23d472f9525e 100644
--- a/trunk/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/trunk/arch/arm/mach-shmobile/board-ap4evb.c
@@ -25,7 +25,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -40,7 +39,6 @@
#include
#include
#include
-#include
#include
#include
@@ -309,7 +307,6 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_ocr_mask = MMC_VDD_165_195,
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
};
static struct resource sdhi1_resources[] = {
@@ -561,7 +558,7 @@ static struct resource fsi_resources[] = {
static struct platform_device fsi_device = {
.name = "sh_fsi2",
- .id = -1,
+ .id = 0,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
.dev = {
@@ -653,44 +650,7 @@ static struct platform_device hdmi_device = {
},
};
-static struct gpio_led ap4evb_leds[] = {
- {
- .name = "led4",
- .gpio = GPIO_PORT185,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led2",
- .gpio = GPIO_PORT186,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led3",
- .gpio = GPIO_PORT187,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "led1",
- .gpio = GPIO_PORT188,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- }
-};
-
-static struct gpio_led_platform_data ap4evb_leds_pdata = {
- .num_leds = ARRAY_SIZE(ap4evb_leds),
- .leds = ap4evb_leds,
-};
-
-static struct platform_device leds_device = {
- .name = "leds-gpio",
- .id = 0,
- .dev = {
- .platform_data = &ap4evb_leds_pdata,
- },
-};
-
static struct platform_device *ap4evb_devices[] __initdata = {
- &leds_device,
&nor_flash_device,
&smc911x_device,
&sdhi0_device,
@@ -880,6 +840,20 @@ static void __init ap4evb_init(void)
gpio_request(GPIO_FN_CS5A, NULL);
gpio_request(GPIO_FN_IRQ6_39, NULL);
+ /* enable LED 1 - 4 */
+ gpio_request(GPIO_PORT185, NULL);
+ gpio_request(GPIO_PORT186, NULL);
+ gpio_request(GPIO_PORT187, NULL);
+ gpio_request(GPIO_PORT188, NULL);
+ gpio_direction_output(GPIO_PORT185, 1);
+ gpio_direction_output(GPIO_PORT186, 1);
+ gpio_direction_output(GPIO_PORT187, 1);
+ gpio_direction_output(GPIO_PORT188, 1);
+ gpio_export(GPIO_PORT185, 0);
+ gpio_export(GPIO_PORT186, 0);
+ gpio_export(GPIO_PORT187, 0);
+ gpio_export(GPIO_PORT188, 0);
+
/* enable Debug switch (S6) */
gpio_request(GPIO_PORT32, NULL);
gpio_request(GPIO_PORT33, NULL);
diff --git a/trunk/arch/arm/mach-shmobile/clock-sh7372.c b/trunk/arch/arm/mach-shmobile/clock-sh7372.c
index 759468992ad2..fb4e9b1d788e 100644
--- a/trunk/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/trunk/arch/arm/mach-shmobile/clock-sh7372.c
@@ -286,6 +286,7 @@ static struct clk_ops pllc2_clk_ops = {
struct clk pllc2_clk = {
.ops = &pllc2_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
.parent = &extal1_div2_clk,
.freq_table = pllc2_freq_table,
.parent_table = pllc2_parent,
@@ -394,7 +395,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
enum { MSTP001,
MSTP131, MSTP130,
- MSTP129, MSTP128, MSTP127, MSTP126,
+ MSTP129, MSTP128,
MSTP118, MSTP117, MSTP116,
MSTP106, MSTP101, MSTP100,
MSTP223,
@@ -412,8 +413,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
- [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
- [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -429,7 +428,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
+ [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -499,8 +498,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
- CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
- CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
diff --git a/trunk/arch/arm/mach-shmobile/clock.c b/trunk/arch/arm/mach-shmobile/clock.c
index 6b7c7c42bc8f..b7c705a213a2 100644
--- a/trunk/arch/arm/mach-shmobile/clock.c
+++ b/trunk/arch/arm/mach-shmobile/clock.c
@@ -1,10 +1,8 @@
/*
- * SH-Mobile Clock Framework
+ * SH-Mobile Timer
*
* Copyright (C) 2010 Magnus Damm
*
- * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
diff --git a/trunk/arch/arm/mach-shmobile/pm_runtime.c b/trunk/arch/arm/mach-shmobile/pm_runtime.c
deleted file mode 100644
index 94912d3944d3..000000000000
--- a/trunk/arch/arm/mach-shmobile/pm_runtime.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * arch/arm/mach-shmobile/pm_runtime.c
- *
- * Runtime PM support code for SuperH Mobile ARM
- *
- * Copyright (C) 2009-2010 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef CONFIG_PM_RUNTIME
-#define BIT_ONCE 0
-#define BIT_ACTIVE 1
-#define BIT_CLK_ENABLED 2
-
-struct pm_runtime_data {
- unsigned long flags;
- struct clk *clk;
-};
-
-static void __devres_release(struct device *dev, void *res)
-{
- struct pm_runtime_data *prd = res;
-
- dev_dbg(dev, "__devres_release()\n");
-
- if (test_bit(BIT_CLK_ENABLED, &prd->flags))
- clk_disable(prd->clk);
-
- if (test_bit(BIT_ACTIVE, &prd->flags))
- clk_put(prd->clk);
-}
-
-static struct pm_runtime_data *__to_prd(struct device *dev)
-{
- return devres_find(dev, __devres_release, NULL, NULL);
-}
-
-static void platform_pm_runtime_init(struct device *dev,
- struct pm_runtime_data *prd)
-{
- if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
- prd->clk = clk_get(dev, NULL);
- if (!IS_ERR(prd->clk)) {
- set_bit(BIT_ACTIVE, &prd->flags);
- dev_info(dev, "clocks managed by runtime pm\n");
- }
- }
-}
-
-static void platform_pm_runtime_bug(struct device *dev,
- struct pm_runtime_data *prd)
-{
- if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
- dev_err(dev, "runtime pm suspend before resume\n");
-}
-
-int platform_pm_runtime_suspend(struct device *dev)
-{
- struct pm_runtime_data *prd = __to_prd(dev);
-
- dev_dbg(dev, "platform_pm_runtime_suspend()\n");
-
- platform_pm_runtime_bug(dev, prd);
-
- if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
- clk_disable(prd->clk);
- clear_bit(BIT_CLK_ENABLED, &prd->flags);
- }
-
- return 0;
-}
-
-int platform_pm_runtime_resume(struct device *dev)
-{
- struct pm_runtime_data *prd = __to_prd(dev);
-
- dev_dbg(dev, "platform_pm_runtime_resume()\n");
-
- platform_pm_runtime_init(dev, prd);
-
- if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
- clk_enable(prd->clk);
- set_bit(BIT_CLK_ENABLED, &prd->flags);
- }
-
- return 0;
-}
-
-int platform_pm_runtime_idle(struct device *dev)
-{
- /* suspend synchronously to disable clocks immediately */
- return pm_runtime_suspend(dev);
-}
-
-static int platform_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct pm_runtime_data *prd;
-
- dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
-
- if (action == BUS_NOTIFY_BIND_DRIVER) {
- prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
- if (prd)
- devres_add(dev, prd);
- else
- dev_err(dev, "unable to alloc memory for runtime pm\n");
- }
-
- return 0;
-}
-
-#else /* CONFIG_PM_RUNTIME */
-
-static int platform_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct clk *clk;
-
- dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
-
- switch (action) {
- case BUS_NOTIFY_BIND_DRIVER:
- clk = clk_get(dev, NULL);
- if (!IS_ERR(clk)) {
- clk_enable(clk);
- clk_put(clk);
- dev_info(dev, "runtime pm disabled, clock forced on\n");
- }
- break;
- case BUS_NOTIFY_UNBOUND_DRIVER:
- clk = clk_get(dev, NULL);
- if (!IS_ERR(clk)) {
- clk_disable(clk);
- clk_put(clk);
- dev_info(dev, "runtime pm disabled, clock forced off\n");
- }
- break;
- }
-
- return 0;
-}
-
-#endif /* CONFIG_PM_RUNTIME */
-
-static struct notifier_block platform_bus_notifier = {
- .notifier_call = platform_bus_notify
-};
-
-static int __init sh_pm_runtime_init(void)
-{
- bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
- return 0;
-}
-core_initcall(sh_pm_runtime_init);
diff --git a/trunk/arch/arm/mach-tegra/board-harmony.c b/trunk/arch/arm/mach-tegra/board-harmony.c
index 9e305de56be9..05e78dd9b50c 100644
--- a/trunk/arch/arm/mach-tegra/board-harmony.c
+++ b/trunk/arch/arm/mach-tegra/board-harmony.c
@@ -91,8 +91,10 @@ static void __init tegra_harmony_fixup(struct machine_desc *desc,
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
mi->bank[0].size = 448 * SZ_1M;
mi->bank[1].start = SZ_512M;
+ mi->bank[1].node = PHYS_TO_NID(SZ_512M);
mi->bank[1].size = SZ_512M;
}
diff --git a/trunk/arch/arm/mach-tegra/include/mach/vmalloc.h b/trunk/arch/arm/mach-tegra/include/mach/vmalloc.h
index fd6aa65b2dc6..267a141730d9 100644
--- a/trunk/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ b/trunk/arch/arm/mach-tegra/include/mach/vmalloc.h
@@ -23,6 +23,6 @@
#include
-#define VMALLOC_END 0xFE000000UL
+#define VMALLOC_END 0xFE000000
#endif
diff --git a/trunk/arch/arm/mm/Kconfig b/trunk/arch/arm/mm/Kconfig
index a0a2928ae4dd..33c3f570aaa0 100644
--- a/trunk/arch/arm/mm/Kconfig
+++ b/trunk/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@ config CPU_V6
# ARMv6k
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
- depends on CPU_V6 || CPU_V7
+ depends on CPU_V6
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c
index 4bc43e535d3b..c704eed63c5d 100644
--- a/trunk/arch/arm/mm/dma-mapping.c
+++ b/trunk/arch/arm/mm/dma-mapping.c
@@ -229,8 +229,6 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
}
} while (size -= PAGE_SIZE);
- dsb();
-
return (void *)c->vm_start;
}
return NULL;
diff --git a/trunk/arch/arm/plat-mxc/Kconfig b/trunk/arch/arm/plat-mxc/Kconfig
index 6785db4179b8..0527e65318f4 100644
--- a/trunk/arch/arm/plat-mxc/Kconfig
+++ b/trunk/arch/arm/plat-mxc/Kconfig
@@ -43,7 +43,6 @@ config ARCH_MXC91231
config ARCH_MX5
bool "MX5-based"
select CPU_V7
- select ARM_L1_CACHE_SHIFT_6
help
This enables support for systems based on the Freescale i.MX51 family
diff --git a/trunk/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/trunk/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
index 656acb45d434..634e3f4c454d 100644
--- a/trunk/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
+++ b/trunk/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
@@ -37,9 +37,9 @@
* mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
*/
-extern void eukrea_mbimxsd25_baseboard_init(void);
+extern void eukrea_mbimx25_baseboard_init(void);
extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimxsd35_baseboard_init(void);
+extern void eukrea_mbimx35_baseboard_init(void);
extern void eukrea_mbimx51_baseboard_init(void);
#endif
diff --git a/trunk/arch/arm/plat-mxc/tzic.c b/trunk/arch/arm/plat-mxc/tzic.c
index 3703ab28257f..b3da9aad4295 100644
--- a/trunk/arch/arm/plat-mxc/tzic.c
+++ b/trunk/arch/arm/plat-mxc/tzic.c
@@ -164,9 +164,8 @@ int tzic_enable_wake(int is_idle)
return -EAGAIN;
for (i = 0; i < 4; i++) {
- v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
- wakeup_intr[i];
- __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
+ v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
+ __raw_writel(v, TZIC_WAKEUP0(i));
}
return 0;
diff --git a/trunk/arch/arm/plat-omap/include/plat/smp.h b/trunk/arch/arm/plat-omap/include/plat/smp.h
index 5177a9c5a25a..6a3ff65c0303 100644
--- a/trunk/arch/arm/plat-omap/include/plat/smp.h
+++ b/trunk/arch/arm/plat-omap/include/plat/smp.h
@@ -19,6 +19,13 @@
#include
+/*
+ * set_event() is used to wake up secondary core from wfe using sev. ROM
+ * code puts the second core into wfe(standby).
+ *
+ */
+#define set_event() __asm__ __volatile__ ("sev" : : : "memory")
+
/* Needed for secondary core boot */
extern void omap_secondary_startup(void);
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
diff --git a/trunk/arch/arm/plat-pxa/pwm.c b/trunk/arch/arm/plat-pxa/pwm.c
index ef32686feef9..0732c6c8d511 100644
--- a/trunk/arch/arm/plat-pxa/pwm.c
+++ b/trunk/arch/arm/plat-pxa/pwm.c
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
static int __devinit pwm_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct platform_device_id *id = platform_get_device_id(pdev);
struct pwm_device *pwm, *secondary = NULL;
struct resource *r;
int ret = 0;
diff --git a/trunk/arch/arm/plat-s5p/include/plat/map-s5p.h b/trunk/arch/arm/plat-s5p/include/plat/map-s5p.h
index c4ff88bf6477..54e9fb9d315e 100644
--- a/trunk/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/trunk/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -17,7 +17,6 @@
#define S5P_VA_GPIO S3C_ADDR(0x00500000)
#define S5P_VA_SYSTIMER S3C_ADDR(0x01200000)
#define S5P_VA_SROMC S3C_ADDR(0x01100000)
-#define S5P_VA_SYSRAM S3C_ADDR(0x01180000)
#define S5P_VA_COMBINER_BASE S3C_ADDR(0x00600000)
#define S5P_VA_COMBINER(x) (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10)
@@ -30,7 +29,6 @@
#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
#define S5P_VA_L2CC S3C_ADDR(0x00900000)
-#define S5P_VA_CMU S3C_ADDR(0x00920000)
#define S5P_VA_UART(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_VA_UART0 S5P_VA_UART(0)
diff --git a/trunk/arch/arm/plat-samsung/dev-hsmmc.c b/trunk/arch/arm/plat-samsung/dev-hsmmc.c
index 9d2be0941410..b0f93f11e281 100644
--- a/trunk/arch/arm/plat-samsung/dev-hsmmc.c
+++ b/trunk/arch/arm/plat-samsung/dev-hsmmc.c
@@ -70,6 +70,4 @@ void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
- if (pd->host_caps)
- set->host_caps = pd->host_caps;
}
diff --git a/trunk/arch/arm/plat-samsung/dev-hsmmc1.c b/trunk/arch/arm/plat-samsung/dev-hsmmc1.c
index a6c8295840af..1504fd802865 100644
--- a/trunk/arch/arm/plat-samsung/dev-hsmmc1.c
+++ b/trunk/arch/arm/plat-samsung/dev-hsmmc1.c
@@ -70,6 +70,4 @@ void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
- if (pd->host_caps)
- set->host_caps = pd->host_caps;
}
diff --git a/trunk/arch/arm/plat-samsung/dev-hsmmc2.c b/trunk/arch/arm/plat-samsung/dev-hsmmc2.c
index cb0d7143381a..b28ef173444d 100644
--- a/trunk/arch/arm/plat-samsung/dev-hsmmc2.c
+++ b/trunk/arch/arm/plat-samsung/dev-hsmmc2.c
@@ -71,6 +71,4 @@ void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd)
set->cfg_gpio = pd->cfg_gpio;
if (pd->cfg_card)
set->cfg_card = pd->cfg_card;
- if (pd->host_caps)
- set->host_caps = pd->host_caps;
}
diff --git a/trunk/arch/arm/tools/mach-types b/trunk/arch/arm/tools/mach-types
index 55590a4d87c9..48cbdcb6bbd4 100644
--- a/trunk/arch/arm/tools/mach-types
+++ b/trunk/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Thu Sep 9 22:43:01 2010
+# Last update: Mon Jul 12 21:10:14 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634
gw2388 MACH_GW2388 GW2388 2635
jadecpu MACH_JADECPU JADECPU 2636
carlisle MACH_CARLISLE CARLISLE 2637
-lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
+lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638
nemid_tb MACH_NEMID_TB NEMID_TB 2639
terrier MACH_TERRIER TERRIER 2640
turbot MACH_TURBOT TURBOT 2641
@@ -2950,97 +2950,3 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
netviz MACH_NETVIZ NETVIZ 2964
flexibity MACH_FLEXIBITY FLEXIBITY 2965
wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
-lpc24xx MACH_LPC24XX LPC24XX 2967
-spica MACH_SPICA SPICA 2968
-gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
-bipnet MACH_BIPNET BIPNET 2970
-overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
-davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
-pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
-ptx7545 MACH_PTX7545 PTX7545 2974
-tm_efdc MACH_TM_EFDC TM_EFDC 2975
-omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
-flyer MACH_FLYER FLYER 2978
-tornado3240 MACH_TORNADO3240 TORNADO3240 2979
-soli_01 MACH_SOLI_01 SOLI_01 2980
-omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
-helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
-netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
-ssc MACH_SSC SSC 2984
-premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
-wasabi MACH_WASABI WASABI 2986
-vivow MACH_VIVOW VIVOW 2987
-mx50_rdp MACH_MX50_RDP MX50_RDP 2988
-universal MACH_UNIVERSAL UNIVERSAL 2989
-real6410 MACH_REAL6410 REAL6410 2990
-spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
-ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
-omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
-thebe MACH_THEBE THEBE 2994
-rv082 MACH_RV082 RV082 2995
-armlguest MACH_ARMLGUEST ARMLGUEST 2996
-tjinc1000 MACH_TJINC1000 TJINC1000 2997
-dockstar MACH_DOCKSTAR DOCKSTAR 2998
-ax8008 MACH_AX8008 AX8008 2999
-gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
-pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
-ea20 MACH_EA20 EA20 3002
-awm2 MACH_AWM2 AWM2 3003
-ti8148evm MACH_TI8148EVM TI8148EVM 3004
-tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
-linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
-tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
-rubys MACH_RUBYS RUBYS 3008
-aquarius MACH_AQUARIUS AQUARIUS 3009
-mx53_ard MACH_MX53_ARD MX53_ARD 3010
-mx53_smd MACH_MX53_SMD MX53_SMD 3011
-lswxl MACH_LSWXL LSWXL 3012
-dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
-sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
-jocpu550 MACH_JOCPU550 JOCPU550 3015
-msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
-msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
-yanomami MACH_YANOMAMI YANOMAMI 3018
-gta04 MACH_GTA04 GTA04 3019
-cm_a510 MACH_CM_A510 CM_A510 3020
-omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
-kx33xx MACH_KX33XX KX33XX 3022
-ptx7510 MACH_PTX7510 PTX7510 3023
-top9000 MACH_TOP9000 TOP9000 3024
-teenote MACH_TEENOTE TEENOTE 3025
-ts3 MACH_TS3 TS3 3026
-a0 MACH_A0 A0 3027
-fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
-fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
-frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
-remus MACH_REMUS REMUS 3031
-at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
-at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
-kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
-oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
-armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
-spdm MACH_SPDM SPDM 3037
-gtib MACH_GTIB GTIB 3038
-dgm3240 MACH_DGM3240 DGM3240 3039
-atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
-htcmega MACH_HTCMEGA HTCMEGA 3041
-tricorder MACH_TRICORDER TRICORDER 3042
-tx28 MACH_TX28 TX28 3043
-bstbrd MACH_BSTBRD BSTBRD 3044
-pwb3090 MACH_PWB3090 PWB3090 3045
-idea6410 MACH_IDEA6410 IDEA6410 3046
-qbc9263 MACH_QBC9263 QBC9263 3047
-borabora MACH_BORABORA BORABORA 3048
-valdez MACH_VALDEZ VALDEZ 3049
-ls9g20 MACH_LS9G20 LS9G20 3050
-mios_v1 MACH_MIOS_V1 MIOS_V1 3051
-s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
-controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
-tin307 MACH_TIN307 TIN307 3054
-tin510 MACH_TIN510 TIN510 3055
-bluecheese MACH_BLUECHEESE BLUECHEESE 3057
-tem3x30 MACH_TEM3X30 TEM3X30 3058
-harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
-msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
-spear900 MACH_SPEAR900 SPEAR900 3061
-pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
diff --git a/trunk/arch/avr32/kernel/process.c b/trunk/arch/avr32/kernel/process.c
index 9c46aaad11ce..e5daddff397d 100644
--- a/trunk/arch/avr32/kernel/process.c
+++ b/trunk/arch/avr32/kernel/process.c
@@ -384,9 +384,8 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
}
asmlinkage int sys_execve(const char __user *ufilename,
- const char __user *const __user *uargv,
- const char __user *const __user *uenvp,
- struct pt_regs *regs)
+ char __user *__user *uargv,
+ char __user *__user *uenvp, struct pt_regs *regs)
{
int error;
char *filename;
diff --git a/trunk/arch/avr32/kernel/sys_avr32.c b/trunk/arch/avr32/kernel/sys_avr32.c
index 62635a09ae3e..459349b5ed5a 100644
--- a/trunk/arch/avr32/kernel/sys_avr32.c
+++ b/trunk/arch/avr32/kernel/sys_avr32.c
@@ -7,9 +7,7 @@
*/
#include
-int kernel_execve(const char *file,
- const char *const *argv,
- const char *const *envp)
+int kernel_execve(const char *file, char **argv, char **envp)
{
register long scno asm("r8") = __NR_execve;
register long sc1 asm("r12") = (long)file;
diff --git a/trunk/arch/blackfin/include/asm/bfin_sport.h b/trunk/arch/blackfin/include/asm/bfin_sport.h
index d27600c262c2..9626cf7e4251 100644
--- a/trunk/arch/blackfin/include/asm/bfin_sport.h
+++ b/trunk/arch/blackfin/include/asm/bfin_sport.h
@@ -115,6 +115,12 @@ struct sport_register {
#endif
+/* Workaround defBF*.h SPORT MMRs till they get cleansed */
+#undef DTYPE_NORM
+#undef SLEN
+#undef SP_WOFF
+#undef SP_WSIZE
+
/* SPORT_TCR1 Masks */
#define TSPEN 0x0001 /* TX enable */
#define ITCLK 0x0002 /* Internal TX Clock Select */
diff --git a/trunk/arch/blackfin/include/asm/bitops.h b/trunk/arch/blackfin/include/asm/bitops.h
index 3f7ef4d97791..d5872cd967ab 100644
--- a/trunk/arch/blackfin/include/asm/bitops.h
+++ b/trunk/arch/blackfin/include/asm/bitops.h
@@ -22,9 +22,7 @@
#include
#include
-#include
#include
-
#include
#include
#include
@@ -117,7 +115,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* of bits set) of a N-bit word
*/
-static inline unsigned int __arch_hweight32(unsigned int w)
+static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
@@ -127,20 +125,19 @@ static inline unsigned int __arch_hweight32(unsigned int w)
return res;
}
-static inline unsigned int __arch_hweight64(__u64 w)
+static inline unsigned int hweight64(__u64 w)
{
- return __arch_hweight32((unsigned int)(w >> 32)) +
- __arch_hweight32((unsigned int)w);
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
}
-static inline unsigned int __arch_hweight16(unsigned int w)
+static inline unsigned int hweight16(unsigned int w)
{
- return __arch_hweight32(w & 0xffff);
+ return hweight32(w & 0xffff);
}
-static inline unsigned int __arch_hweight8(unsigned int w)
+static inline unsigned int hweight8(unsigned int w)
{
- return __arch_hweight32(w & 0xff);
+ return hweight32(w & 0xff);
}
#endif /* _BLACKFIN_BITOPS_H */
diff --git a/trunk/arch/blackfin/include/asm/unistd.h b/trunk/arch/blackfin/include/asm/unistd.h
index 14fcd254b185..22886cbdae7a 100644
--- a/trunk/arch/blackfin/include/asm/unistd.h
+++ b/trunk/arch/blackfin/include/asm/unistd.h
@@ -389,11 +389,8 @@
#define __NR_rt_tgsigqueueinfo 368
#define __NR_perf_event_open 369
#define __NR_recvmmsg 370
-#define __NR_fanotify_init 371
-#define __NR_fanotify_mark 372
-#define __NR_prlimit64 373
-#define __NR_syscall 374
+#define __NR_syscall 371
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/trunk/arch/blackfin/kernel/process.c b/trunk/arch/blackfin/kernel/process.c
index 01f98cb964d2..a566f61c002a 100644
--- a/trunk/arch/blackfin/kernel/process.c
+++ b/trunk/arch/blackfin/kernel/process.c
@@ -209,9 +209,7 @@ copy_thread(unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp)
+asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char *filename;
diff --git a/trunk/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/trunk/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
index 037a51fd8e93..2bc8f4f98011 100644
--- a/trunk/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ b/trunk/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
@@ -913,6 +913,88 @@
#define PH6 0x0040
#define PH7 0x0080
+
+/* ******************* SERIAL PORT MASKS **************************************/
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* Transmit Enable */
+#define ITCLK 0x0002 /* Internal Transmit Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* Transmit Bit Order */
+#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
+#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
+#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
+#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
+#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
+#define TCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_TCR2 Masks and Macro */
+#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
+#define TXSE 0x0100 /* TX Secondary Enable */
+#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
+#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* Receive Enable */
+#define IRCLK 0x0002 /* Internal Receive Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define RLSBIT 0x0010 /* Receive Bit Order */
+#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
+#define RFSR 0x0400 /* Receive Frame Sync Required Select */
+#define LRFS 0x1000 /* Low Receive Frame Sync Select */
+#define LARFS 0x2000 /* Late Receive Frame Sync Select */
+#define RCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_RCR2 Masks */
+#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
+#define RXSE 0x0100 /* RX Secondary Enable */
+#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /* Right-First Data Order */
+
+/* SPORTx_STAT Masks */
+#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
+#define RUVF 0x0002 /* Sticky Receive Underflow Status */
+#define ROVF 0x0004 /* Sticky Receive Overflow Status */
+#define TXF 0x0008 /* Transmit FIFO Full Status */
+#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
+#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
+#define TXHRE 0x0040 /* Transmit Hold Register Empty */
+
+/* SPORTx_MCMC1 Macros */
+#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
+
+/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+
+/* SPORTx_MCMC2 Masks */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
+#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
+#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
+#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
+#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
+#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
+#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
+#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
+#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
+#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
+#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
+#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
+#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
+#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
+#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
+#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
+#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
+#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
+#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
+
+
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/trunk/arch/blackfin/mach-bf527/boards/cm_bf527.c b/trunk/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 645ba5c8077b..f392af641657 100644
--- a/trunk/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/trunk/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -145,6 +145,7 @@ static struct mtd_partition partition_info[] = {
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
+ .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/trunk/arch/blackfin/mach-bf527/boards/ezbrd.c b/trunk/arch/blackfin/mach-bf527/boards/ezbrd.c
index c975fe88eba3..606eb36b9d6e 100644
--- a/trunk/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/trunk/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -149,6 +149,7 @@ static struct mtd_partition partition_info[] = {
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
+ .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
index 87b41e994ba3..a05c967a24cf 100644
--- a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -234,6 +234,7 @@ static struct mtd_partition partition_info[] = {
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
+ .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/trunk/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/trunk/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
index 3e000756aacd..5f97f01fcda6 100644
--- a/trunk/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ b/trunk/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
@@ -922,6 +922,88 @@
#define PH14 0x4000
#define PH15 0x8000
+
+/* ******************* SERIAL PORT MASKS **************************************/
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* Transmit Enable */
+#define ITCLK 0x0002 /* Internal Transmit Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* Transmit Bit Order */
+#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
+#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
+#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
+#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
+#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
+#define TCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_TCR2 Masks and Macro */
+#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
+#define TXSE 0x0100 /* TX Secondary Enable */
+#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
+#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* Receive Enable */
+#define IRCLK 0x0002 /* Internal Receive Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define RLSBIT 0x0010 /* Receive Bit Order */
+#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
+#define RFSR 0x0400 /* Receive Frame Sync Required Select */
+#define LRFS 0x1000 /* Low Receive Frame Sync Select */
+#define LARFS 0x2000 /* Late Receive Frame Sync Select */
+#define RCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_RCR2 Masks */
+#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
+#define RXSE 0x0100 /* RX Secondary Enable */
+#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /* Right-First Data Order */
+
+/* SPORTx_STAT Masks */
+#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
+#define RUVF 0x0002 /* Sticky Receive Underflow Status */
+#define ROVF 0x0004 /* Sticky Receive Overflow Status */
+#define TXF 0x0008 /* Transmit FIFO Full Status */
+#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
+#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
+#define TXHRE 0x0040 /* Transmit Hold Register Empty */
+
+/* SPORTx_MCMC1 Macros */
+#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
+
+/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+
+/* SPORTx_MCMC2 Masks */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
+#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
+#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
+#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
+#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
+#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
+#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
+#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
+#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
+#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
+#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
+#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
+#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
+#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
+#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
+#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
+#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
+#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
+#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
+
+
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/trunk/arch/blackfin/mach-bf533/include/mach/defBF532.h b/trunk/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 04acf1ed10f9..e9ff491c0953 100644
--- a/trunk/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/trunk/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -509,6 +509,98 @@
#define IREN_P 0x01
#define UCEN_P 0x00
+/* ********** SERIAL PORT MASKS ********************** */
+
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* TX enable */
+#define ITCLK 0x0002 /* Internal TX Clock Select */
+#define TDTYPE 0x000C /* TX Data Formatting Select */
+#define DTYPE_NORM 0x0000 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* TX Bit Order */
+#define ITFS 0x0200 /* Internal TX Frame Sync Select */
+#define TFSR 0x0400 /* TX Frame Sync Required Select */
+#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
+#define LTFS 0x1000 /* Low TX Frame Sync Select */
+#define LATFS 0x2000 /* Late TX Frame Sync Select */
+#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
+
+/* SPORTx_TCR2 Masks */
+#if defined(__ADSPBF531__) || defined(__ADSPBF532__) || \
+ defined(__ADSPBF533__)
+# define SLEN 0x001F /*TX Word Length */
+#else
+# define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
+#endif
+#define TXSE 0x0100 /*TX Secondary Enable */
+#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
+#define TRFST 0x0400 /*TX Right-First Data Order */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* RX enable */
+#define IRCLK 0x0002 /* Internal RX Clock Select */
+#define RDTYPE 0x000C /* RX Data Formatting Select */
+#define DTYPE_NORM 0x0000 /* no companding */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define RLSBIT 0x0010 /* RX Bit Order */
+#define IRFS 0x0200 /* Internal RX Frame Sync Select */
+#define RFSR 0x0400 /* RX Frame Sync Required Select */
+#define LRFS 0x1000 /* Low RX Frame Sync Select */
+#define LARFS 0x2000 /* Late RX Frame Sync Select */
+#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
+
+/* SPORTx_RCR2 Masks */
+/* SLEN defined above */
+#define RXSE 0x0100 /*RX Secondary Enable */
+#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /*Right-First Data Order */
+
+/*SPORTx_STAT Masks */
+#define RXNE 0x0001 /*RX FIFO Not Empty Status */
+#define RUVF 0x0002 /*RX Underflow Status */
+#define ROVF 0x0004 /*RX Overflow Status */
+#define TXF 0x0008 /*TX FIFO Full Status */
+#define TUVF 0x0010 /*TX Underflow Status */
+#define TOVF 0x0020 /*TX Overflow Status */
+#define TXHRE 0x0040 /*TX Hold Register Empty */
+
+/*SPORTx_MCMC1 Masks */
+#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
+#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
+/* SPORTx_MCMC1 Macros */
+#define SET_SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
+/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SET_SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+
+/*SPORTx_MCMC2 Masks */
+#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
+#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
+#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
+#define MFD 0x0000F000 /*Multichannel Frame Delay */
+#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
+#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
+#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
+#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
+#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
+#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
+#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
+#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
+#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
+#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
+#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
+#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
+#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
+#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
+#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
+#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
+
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
diff --git a/trunk/arch/blackfin/mach-bf537/include/mach/defBF534.h b/trunk/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 6f56907a18c0..aad61b887373 100644
--- a/trunk/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/trunk/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1241,6 +1241,86 @@
#define PH14 0x4000
#define PH15 0x8000
+/* ******************* SERIAL PORT MASKS **************************************/
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* Transmit Enable */
+#define ITCLK 0x0002 /* Internal Transmit Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* Transmit Bit Order */
+#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
+#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
+#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
+#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
+#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
+#define TCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_TCR2 Masks and Macro */
+#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
+#define TXSE 0x0100 /* TX Secondary Enable */
+#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
+#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* Receive Enable */
+#define IRCLK 0x0002 /* Internal Receive Clock Select */
+#define DTYPE_NORM 0x0004 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define RLSBIT 0x0010 /* Receive Bit Order */
+#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
+#define RFSR 0x0400 /* Receive Frame Sync Required Select */
+#define LRFS 0x1000 /* Low Receive Frame Sync Select */
+#define LARFS 0x2000 /* Late Receive Frame Sync Select */
+#define RCKFE 0x4000 /* Clock Falling Edge Select */
+
+/* SPORTx_RCR2 Masks */
+#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
+#define RXSE 0x0100 /* RX Secondary Enable */
+#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /* Right-First Data Order */
+
+/* SPORTx_STAT Masks */
+#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
+#define RUVF 0x0002 /* Sticky Receive Underflow Status */
+#define ROVF 0x0004 /* Sticky Receive Overflow Status */
+#define TXF 0x0008 /* Transmit FIFO Full Status */
+#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
+#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
+#define TXHRE 0x0040 /* Transmit Hold Register Empty */
+
+/* SPORTx_MCMC1 Macros */
+#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
+
+/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+
+/* SPORTx_MCMC2 Masks */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
+#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
+#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
+#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
+#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
+#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
+#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
+#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
+#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
+#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
+#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
+#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
+#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
+#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
+#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
+#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
+#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
+#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
+#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
+
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/trunk/arch/blackfin/mach-bf538/include/mach/defBF539.h b/trunk/arch/blackfin/mach-bf538/include/mach/defBF539.h
index fe43062b4975..b674a1c4aef1 100644
--- a/trunk/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/trunk/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1610,6 +1610,113 @@
#define UCEN_P 0x00
+/* ********** SERIAL PORT MASKS ********************** */
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* TX enable */
+#define ITCLK 0x0002 /* Internal TX Clock Select */
+#define TDTYPE 0x000C /* TX Data Formatting Select */
+#define DTYPE_NORM 0x0000 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* TX Bit Order */
+#define ITFS 0x0200 /* Internal TX Frame Sync Select */
+#define TFSR 0x0400 /* TX Frame Sync Required Select */
+#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
+#define LTFS 0x1000 /* Low TX Frame Sync Select */
+#define LATFS 0x2000 /* Late TX Frame Sync Select */
+#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
+/* SPORTx_RCR1 Deprecated Masks */
+#define TULAW DTYPE_ULAW /* Compand Using u-Law */
+#define TALAW DTYPE_ALAW /* Compand Using A-Law */
+
+/* SPORTx_TCR2 Masks */
+#ifdef _MISRA_RULES
+#define SLEN(x) ((x)&0x1Fu) /* SPORT TX Word Length (2 - 31) */
+#else
+#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
+#endif /* _MISRA_RULES */
+#define TXSE 0x0100 /*TX Secondary Enable */
+#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
+#define TRFST 0x0400 /*TX Right-First Data Order */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* RX enable */
+#define IRCLK 0x0002 /* Internal RX Clock Select */
+#define RDTYPE 0x000C /* RX Data Formatting Select */
+#define DTYPE_NORM 0x0000 /* no companding */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define RLSBIT 0x0010 /* RX Bit Order */
+#define IRFS 0x0200 /* Internal RX Frame Sync Select */
+#define RFSR 0x0400 /* RX Frame Sync Required Select */
+#define LRFS 0x1000 /* Low RX Frame Sync Select */
+#define LARFS 0x2000 /* Late RX Frame Sync Select */
+#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
+/* SPORTx_RCR1 Deprecated Masks */
+#define RULAW DTYPE_ULAW /* Compand Using u-Law */
+#define RALAW DTYPE_ALAW /* Compand Using A-Law */
+
+/* SPORTx_RCR2 Masks */
+#ifdef _MISRA_RULES
+#define SLEN(x) ((x)&0x1Fu) /* SPORT RX Word Length (2 - 31) */
+#else
+#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
+#endif /* _MISRA_RULES */
+#define RXSE 0x0100 /*RX Secondary Enable */
+#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /*Right-First Data Order */
+
+/*SPORTx_STAT Masks */
+#define RXNE 0x0001 /*RX FIFO Not Empty Status */
+#define RUVF 0x0002 /*RX Underflow Status */
+#define ROVF 0x0004 /*RX Overflow Status */
+#define TXF 0x0008 /*TX FIFO Full Status */
+#define TUVF 0x0010 /*TX Underflow Status */
+#define TOVF 0x0020 /*TX Overflow Status */
+#define TXHRE 0x0040 /*TX Hold Register Empty */
+
+/*SPORTx_MCMC1 Masks */
+#define WOFF 0x000003FF /*Multichannel Window Offset Field */
+/* SPORTx_MCMC1 Macros */
+#ifdef _MISRA_RULES
+#define SET_WOFF(x) ((x) & 0x3FFu) /* Multichannel Window Offset Field */
+/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SET_WSIZE(x) (((((x)>>0x3)-1u)&0xFu) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+#else
+#define SET_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
+/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
+#define SET_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
+#endif /* _MISRA_RULES */
+
+
+/*SPORTx_MCMC2 Masks */
+#define MCCRM 0x0003 /*Multichannel Clock Recovery Mode */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x0004 /*Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x0008 /*Multichannel DMA Receive Packing */
+#define MCMEN 0x0010 /*Multichannel Frame Mode Enable */
+#define FSDR 0x0080 /*Multichannel Frame Sync to Data Relationship */
+#define MFD 0xF000 /*Multichannel Frame Delay */
+#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
+#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
+#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
+#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
+#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
+#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
+#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
+#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
+#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
+#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
+#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
+#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
+#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
+#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
+#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
+#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
+
+
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
#define PORT_EN 0x0001 /* PPI Port Enable */
diff --git a/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c b/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 0c38eec9ade1..dbb6b1d83f6d 100644
--- a/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -706,6 +706,7 @@ static struct mtd_partition partition_info[] = {
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
+ .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/trunk/arch/blackfin/mach-bf548/boards/ezkit.c b/trunk/arch/blackfin/mach-bf548/boards/ezkit.c
index 56682a36e42d..6fcfb9187c35 100644
--- a/trunk/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/trunk/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -849,6 +849,7 @@ static struct mtd_partition partition_info[] = {
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
+ .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/trunk/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/trunk/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 7866197f5485..95ff44601fd1 100644
--- a/trunk/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/trunk/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -2221,6 +2221,73 @@
#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */
+/* Bit masks for SPORTx_TCR1 */
+
+#define TCKFE 0x4000 /* Clock Falling Edge Select */
+#define LATFS 0x2000 /* Late Transmit Frame Sync */
+#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
+#define DITFS 0x800 /* Data-Independent Transmit Frame Sync Select */
+#define TFSR 0x400 /* Transmit Frame Sync Required Select */
+#define ITFS 0x200 /* Internal Transmit Frame Sync Select */
+#define TLSBIT 0x10 /* Transmit Bit Order */
+#define TDTYPE 0xc /* Data Formatting Type Select */
+#define ITCLK 0x2 /* Internal Transmit Clock Select */
+#define TSPEN 0x1 /* Transmit Enable */
+
+/* Bit masks for SPORTx_TCR2 */
+
+#define TRFST 0x400 /* Left/Right Order */
+#define TSFSE 0x200 /* Transmit Stereo Frame Sync Enable */
+#define TXSE 0x100 /* TxSEC Enable */
+#define SLEN_T 0x1f /* SPORT Word Length */
+
+/* Bit masks for SPORTx_RCR1 */
+
+#define RCKFE 0x4000 /* Clock Falling Edge Select */
+#define LARFS 0x2000 /* Late Receive Frame Sync */
+#define LRFS 0x1000 /* Low Receive Frame Sync Select */
+#define RFSR 0x400 /* Receive Frame Sync Required Select */
+#define IRFS 0x200 /* Internal Receive Frame Sync Select */
+#define RLSBIT 0x10 /* Receive Bit Order */
+#define RDTYPE 0xc /* Data Formatting Type Select */
+#define IRCLK 0x2 /* Internal Receive Clock Select */
+#define RSPEN 0x1 /* Receive Enable */
+
+/* Bit masks for SPORTx_RCR2 */
+
+#define RRFST 0x400 /* Left/Right Order */
+#define RSFSE 0x200 /* Receive Stereo Frame Sync Enable */
+#define RXSE 0x100 /* RxSEC Enable */
+#define SLEN_R 0x1f /* SPORT Word Length */
+
+/* Bit masks for SPORTx_STAT */
+
+#define TXHRE 0x40 /* Transmit Hold Register Empty */
+#define TOVF 0x20 /* Sticky Transmit Overflow Status */
+#define TUVF 0x10 /* Sticky Transmit Underflow Status */
+#define TXF 0x8 /* Transmit FIFO Full Status */
+#define ROVF 0x4 /* Sticky Receive Overflow Status */
+#define RUVF 0x2 /* Sticky Receive Underflow Status */
+#define RXNE 0x1 /* Receive FIFO Not Empty Status */
+
+/* Bit masks for SPORTx_MCMC1 */
+
+#define SP_WSIZE 0xf000 /* Window Size */
+#define SP_WOFF 0x3ff /* Windows Offset */
+
+/* Bit masks for SPORTx_MCMC2 */
+
+#define MFD 0xf000 /* Multi channel Frame Delay */
+#define FSDR 0x80 /* Frame Sync to Data Relationship */
+#define MCMEN 0x10 /* Multi channel Frame Mode Enable */
+#define MCDRXPE 0x8 /* Multi channel DMA Receive Packing */
+#define MCDTXPE 0x4 /* Multi channel DMA Transmit Packing */
+#define MCCRM 0x3 /* 2X Clock Recovery Mode */
+
+/* Bit masks for SPORTx_CHNL */
+
+#define CUR_CHNL 0x3ff /* Current Channel Indicator */
+
/* Bit masks for UARTx_LCR */
#if 0
diff --git a/trunk/arch/blackfin/mach-bf561/include/mach/defBF561.h b/trunk/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 2674f0097576..4c8e36b7fb33 100644
--- a/trunk/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/trunk/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1007,6 +1007,66 @@
#define IREN_P 0x01
#define UCEN_P 0x00
+/* ********** SERIAL PORT MASKS ********************** */
+
+/* SPORTx_TCR1 Masks */
+#define TSPEN 0x0001 /* TX enable */
+#define ITCLK 0x0002 /* Internal TX Clock Select */
+#define TDTYPE 0x000C /* TX Data Formatting Select */
+#define TLSBIT 0x0010 /* TX Bit Order */
+#define ITFS 0x0200 /* Internal TX Frame Sync Select */
+#define TFSR 0x0400 /* TX Frame Sync Required Select */
+#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
+#define LTFS 0x1000 /* Low TX Frame Sync Select */
+#define LATFS 0x2000 /* Late TX Frame Sync Select */
+#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
+
+/* SPORTx_TCR2 Masks */
+#define SLEN 0x001F /*TX Word Length */
+#define TXSE 0x0100 /*TX Secondary Enable */
+#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
+#define TRFST 0x0400 /*TX Right-First Data Order */
+
+/* SPORTx_RCR1 Masks */
+#define RSPEN 0x0001 /* RX enable */
+#define IRCLK 0x0002 /* Internal RX Clock Select */
+#define RDTYPE 0x000C /* RX Data Formatting Select */
+#define RULAW 0x0008 /* u-Law enable */
+#define RALAW 0x000C /* A-Law enable */
+#define RLSBIT 0x0010 /* RX Bit Order */
+#define IRFS 0x0200 /* Internal RX Frame Sync Select */
+#define RFSR 0x0400 /* RX Frame Sync Required Select */
+#define LRFS 0x1000 /* Low RX Frame Sync Select */
+#define LARFS 0x2000 /* Late RX Frame Sync Select */
+#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
+
+/* SPORTx_RCR2 Masks */
+#define SLEN 0x001F /*RX Word Length */
+#define RXSE 0x0100 /*RX Secondary Enable */
+#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /*Right-First Data Order */
+
+/*SPORTx_STAT Masks */
+#define RXNE 0x0001 /*RX FIFO Not Empty Status */
+#define RUVF 0x0002 /*RX Underflow Status */
+#define ROVF 0x0004 /*RX Overflow Status */
+#define TXF 0x0008 /*TX FIFO Full Status */
+#define TUVF 0x0010 /*TX Underflow Status */
+#define TOVF 0x0020 /*TX Overflow Status */
+#define TXHRE 0x0040 /*TX Hold Register Empty */
+
+/*SPORTx_MCMC1 Masks */
+#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
+#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
+
+/*SPORTx_MCMC2 Masks */
+#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
+#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
+#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
+#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
+#define MFD 0x0000F000 /*Multichannel Frame Delay */
+
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
diff --git a/trunk/arch/blackfin/mach-common/entry.S b/trunk/arch/blackfin/mach-common/entry.S
index af1bffa21dc1..a5847f5d67c7 100644
--- a/trunk/arch/blackfin/mach-common/entry.S
+++ b/trunk/arch/blackfin/mach-common/entry.S
@@ -1628,9 +1628,6 @@ ENTRY(_sys_call_table)
.long _sys_rt_tgsigqueueinfo
.long _sys_perf_event_open
.long _sys_recvmmsg /* 370 */
- .long _sys_fanotify_init
- .long _sys_fanotify_mark
- .long _sys_prlimit64
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/trunk/arch/cris/arch-v10/kernel/process.c b/trunk/arch/cris/arch-v10/kernel/process.c
index 9a57db6907f5..93f0f64b1326 100644
--- a/trunk/arch/cris/arch-v10/kernel/process.c
+++ b/trunk/arch/cris/arch-v10/kernel/process.c
@@ -204,9 +204,7 @@ asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char *fname,
- const char *const *argv,
- const char *const *envp,
+asmlinkage int sys_execve(const char *fname, char **argv, char **envp,
long r13, long mof, long srp,
struct pt_regs *regs)
{
diff --git a/trunk/arch/cris/arch-v32/kernel/process.c b/trunk/arch/cris/arch-v32/kernel/process.c
index 562f84718906..2661a9529d70 100644
--- a/trunk/arch/cris/arch-v32/kernel/process.c
+++ b/trunk/arch/cris/arch-v32/kernel/process.c
@@ -218,10 +218,8 @@ sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
/* sys_execve() executes a new program. */
asmlinkage int
-sys_execve(const char *fname,
- const char *const *argv,
- const char *const *envp, long r13, long mof, long srp,
- struct pt_regs *regs)
+sys_execve(const char *fname, char **argv, char **envp, long r13, long mof, long srp,
+ struct pt_regs *regs)
{
int error;
char *filename;
diff --git a/trunk/arch/frv/kernel/process.c b/trunk/arch/frv/kernel/process.c
index 2b63b0191f52..428931cf2f0c 100644
--- a/trunk/arch/frv/kernel/process.c
+++ b/trunk/arch/frv/kernel/process.c
@@ -250,9 +250,8 @@ int copy_thread(unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp)
+asmlinkage int sys_execve(const char __user *name, char __user * __user *argv,
+ char __user * __user *envp)
{
int error;
char * filename;
diff --git a/trunk/arch/h8300/include/asm/atomic.h b/trunk/arch/h8300/include/asm/atomic.h
index 984221abb66d..e936804b7508 100644
--- a/trunk/arch/h8300/include/asm/atomic.h
+++ b/trunk/arch/h8300/include/asm/atomic.h
@@ -18,8 +18,7 @@
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
- unsigned long flags;
- int ret;
+ int ret,flags;
local_irq_save(flags);
ret = v->counter += i;
local_irq_restore(flags);
@@ -31,8 +30,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
- unsigned long flags;
- int ret;
+ int ret,flags;
local_irq_save(flags);
ret = v->counter -= i;
local_irq_restore(flags);
@@ -44,8 +42,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
static __inline__ int atomic_inc_return(atomic_t *v)
{
- unsigned long flags;
- int ret;
+ int ret,flags;
local_irq_save(flags);
v->counter++;
ret = v->counter;
@@ -67,8 +64,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
static __inline__ int atomic_dec_return(atomic_t *v)
{
- unsigned long flags;
- int ret;
+ int ret,flags;
local_irq_save(flags);
--v->counter;
ret = v->counter;
@@ -80,8 +76,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
- unsigned long flags;
- int ret;
+ int ret,flags;
local_irq_save(flags);
--v->counter;
ret = v->counter;
diff --git a/trunk/arch/h8300/include/asm/system.h b/trunk/arch/h8300/include/asm/system.h
index 16bf1560ff68..d98d97685f06 100644
--- a/trunk/arch/h8300/include/asm/system.h
+++ b/trunk/arch/h8300/include/asm/system.h
@@ -3,8 +3,6 @@
#include
-struct pt_regs;
-
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
@@ -157,6 +155,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#define arch_align_stack(x) (x)
-extern void die(const char *str, struct pt_regs *fp, unsigned long err);
+void die(char *str, struct pt_regs *fp, unsigned long err);
#endif /* _H8300_SYSTEM_H */
diff --git a/trunk/arch/h8300/kernel/process.c b/trunk/arch/h8300/kernel/process.c
index 97478138e361..8b7b78d77d5c 100644
--- a/trunk/arch/h8300/kernel/process.c
+++ b/trunk/arch/h8300/kernel/process.c
@@ -212,10 +212,7 @@ int copy_thread(unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char *name,
- const char *const *argv,
- const char *const *envp,
- int dummy, ...)
+asmlinkage int sys_execve(const char *name, char **argv, char **envp,int dummy,...)
{
int error;
char * filename;
diff --git a/trunk/arch/h8300/kernel/sys_h8300.c b/trunk/arch/h8300/kernel/sys_h8300.c
index aaf5e5a48f93..f9b3f44da69f 100644
--- a/trunk/arch/h8300/kernel/sys_h8300.c
+++ b/trunk/arch/h8300/kernel/sys_h8300.c
@@ -51,13 +51,11 @@ asmlinkage void syscall_print(void *dummy,...)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long res __asm__("er0");
- register const char *const *_c __asm__("er3") = envp;
- register const char *const *_b __asm__("er2") = argv;
+ register char *const *_c __asm__("er3") = envp;
+ register char *const *_b __asm__("er2") = argv;
register const char * _a __asm__("er1") = filename;
__asm__ __volatile__ ("mov.l %1,er0\n\t"
"trapa #0\n\t"
diff --git a/trunk/arch/h8300/kernel/traps.c b/trunk/arch/h8300/kernel/traps.c
index dfa05bd908b6..3c0b66bc669e 100644
--- a/trunk/arch/h8300/kernel/traps.c
+++ b/trunk/arch/h8300/kernel/traps.c
@@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp)
printk("\n\n");
}
-void die(const char *str, struct pt_regs *fp, unsigned long err)
+void die(char *str, struct pt_regs *fp, unsigned long err)
{
static int diecount;
diff --git a/trunk/arch/ia64/hp/sim/simserial.c b/trunk/arch/ia64/hp/sim/simserial.c
index 1e8d71ad93ef..2bef5261d96d 100644
--- a/trunk/arch/ia64/hp/sim/simserial.c
+++ b/trunk/arch/ia64/hp/sim/simserial.c
@@ -149,7 +149,7 @@ static void receive_chars(struct tty_struct *tty)
ch = ia64_ssc(0, 0, 0, 0,
SSC_GETCHAR);
while (!ch);
- handle_sysrq(ch);
+ handle_sysrq(ch, NULL);
}
#endif
seen_esc = 0;
diff --git a/trunk/arch/ia64/include/asm/unistd.h b/trunk/arch/ia64/include/asm/unistd.h
index 954d398a54b4..87f1bd1efc82 100644
--- a/trunk/arch/ia64/include/asm/unistd.h
+++ b/trunk/arch/ia64/include/asm/unistd.h
@@ -356,6 +356,8 @@ asmlinkage unsigned long sys_mmap2(
int fd, long pgoff);
struct pt_regs;
struct sigaction;
+long sys_execve(const char __user *filename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs);
asmlinkage long sys_ia64_pipe(void);
asmlinkage long sys_rt_sigaction(int sig,
const struct sigaction __user *act,
diff --git a/trunk/arch/ia64/kernel/process.c b/trunk/arch/ia64/kernel/process.c
index 16f1c7b04c69..a879c03b7f1c 100644
--- a/trunk/arch/ia64/kernel/process.c
+++ b/trunk/arch/ia64/kernel/process.c
@@ -633,9 +633,7 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
}
long
-sys_execve (const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
+sys_execve (const char __user *filename, char __user * __user *argv, char __user * __user *envp,
struct pt_regs *regs)
{
char *fname;
diff --git a/trunk/arch/m32r/kernel/process.c b/trunk/arch/m32r/kernel/process.c
index 422bea9f1dbc..8665a4d868ec 100644
--- a/trunk/arch/m32r/kernel/process.c
+++ b/trunk/arch/m32r/kernel/process.c
@@ -289,8 +289,8 @@ asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *ufilename,
- const char __user *const __user *uargv,
- const char __user *const __user *uenvp,
+ char __user * __user *uargv,
+ char __user * __user *uenvp,
unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, struct pt_regs regs)
{
diff --git a/trunk/arch/m32r/kernel/sys_m32r.c b/trunk/arch/m32r/kernel/sys_m32r.c
index d841fb6cc703..0a00f467edfa 100644
--- a/trunk/arch/m32r/kernel/sys_m32r.c
+++ b/trunk/arch/m32r/kernel/sys_m32r.c
@@ -93,9 +93,7 @@ asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long __scno __asm__ ("r7") = __NR_execve;
register long __arg3 __asm__ ("r2") = (long)(envp);
diff --git a/trunk/arch/m68k/include/asm/ide.h b/trunk/arch/m68k/include/asm/ide.h
index 492fee8a1ab2..3958726664ba 100644
--- a/trunk/arch/m68k/include/asm/ide.h
+++ b/trunk/arch/m68k/include/asm/ide.h
@@ -1,4 +1,6 @@
/*
+ * linux/include/asm-m68k/ide.h
+ *
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
@@ -32,8 +34,6 @@
#include
#include
-#ifdef CONFIG_MMU
-
/*
* Get rid of defs from io.h - ide has its private and conflicting versions
* Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
@@ -53,14 +53,5 @@
#define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n)
#define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n)
-#else
-
-#define __ide_mm_insw(port, addr, n) io_insw((unsigned int)port, addr, n)
-#define __ide_mm_insl(port, addr, n) io_insl((unsigned int)port, addr, n)
-#define __ide_mm_outsw(port, addr, n) io_outsw((unsigned int)port, addr, n)
-#define __ide_mm_outsl(port, addr, n) io_outsl((unsigned int)port, addr, n)
-
-#endif /* CONFIG_MMU */
-
#endif /* __KERNEL__ */
#endif /* _M68K_IDE_H */
diff --git a/trunk/arch/m68k/kernel/process.c b/trunk/arch/m68k/kernel/process.c
index 18732ab23292..221d0b71ce39 100644
--- a/trunk/arch/m68k/kernel/process.c
+++ b/trunk/arch/m68k/kernel/process.c
@@ -315,9 +315,7 @@ EXPORT_SYMBOL(dump_fpu);
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp)
+asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char * filename;
diff --git a/trunk/arch/m68k/kernel/sys_m68k.c b/trunk/arch/m68k/kernel/sys_m68k.c
index 2f431ece7b5f..77896692eb0a 100644
--- a/trunk/arch/m68k/kernel/sys_m68k.c
+++ b/trunk/arch/m68k/kernel/sys_m68k.c
@@ -459,9 +459,7 @@ asmlinkage int sys_getpagesize(void)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
diff --git a/trunk/arch/m68knommu/kernel/process.c b/trunk/arch/m68knommu/kernel/process.c
index 6d3390590e5b..6350f68cd026 100644
--- a/trunk/arch/m68knommu/kernel/process.c
+++ b/trunk/arch/m68knommu/kernel/process.c
@@ -316,14 +316,14 @@ void dump(struct pt_regs *fp)
fp->d0, fp->d1, fp->d2, fp->d3);
printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
fp->d4, fp->d5, fp->a0, fp->a1);
- printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %p\n",
- (unsigned int) rdusp(), fp);
+ printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n",
+ (unsigned int) rdusp(), (unsigned int) fp);
printk(KERN_EMERG "\nCODE:");
tp = ((unsigned char *) fp->pc) - 0x20;
for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "%p: ", tp + i);
+ printk(KERN_EMERG "%08x: ", (int) (tp + i));
printk("%08x ", (int) *sp++);
}
printk(KERN_EMERG "\n");
@@ -332,7 +332,7 @@ void dump(struct pt_regs *fp)
tp = ((unsigned char *) fp) - 0x40;
for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "%p: ", tp + i);
+ printk(KERN_EMERG "%08x: ", (int) (tp + i));
printk("%08x ", (int) *sp++);
}
printk(KERN_EMERG "\n");
@@ -341,7 +341,7 @@ void dump(struct pt_regs *fp)
tp = (unsigned char *) (rdusp() - 0x10);
for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "%p: ", tp + i);
+ printk(KERN_EMERG "%08x: ", (int) (tp + i));
printk("%08x ", (int) *sp++);
}
printk(KERN_EMERG "\n");
@@ -350,9 +350,7 @@ void dump(struct pt_regs *fp)
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char *name,
- const char *const *argv,
- const char *const *envp)
+asmlinkage int sys_execve(const char *name, char **argv, char **envp)
{
int error;
char * filename;
diff --git a/trunk/arch/m68knommu/kernel/sys_m68k.c b/trunk/arch/m68knommu/kernel/sys_m68k.c
index 68488ae47f0a..d65e9c4c930c 100644
--- a/trunk/arch/m68knommu/kernel/sys_m68k.c
+++ b/trunk/arch/m68knommu/kernel/sys_m68k.c
@@ -44,9 +44,7 @@ asmlinkage int sys_getpagesize(void)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
diff --git a/trunk/arch/m68knommu/kernel/vmlinux.lds.S b/trunk/arch/m68knommu/kernel/vmlinux.lds.S
index ef332136f96d..a91b2713451d 100644
--- a/trunk/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/trunk/arch/m68knommu/kernel/vmlinux.lds.S
@@ -150,8 +150,6 @@ SECTIONS {
_sdata = . ;
DATA_DATA
CACHELINE_ALIGNED_DATA(32)
- PAGE_ALIGNED_DATA(PAGE_SIZE)
- *(.data..shared_aligned)
INIT_TASK_DATA(THREAD_SIZE)
_edata = . ;
} > DATA
diff --git a/trunk/arch/microblaze/kernel/prom_parse.c b/trunk/arch/microblaze/kernel/prom_parse.c
index 99d9b61cccb5..d33ba17601fa 100644
--- a/trunk/arch/microblaze/kernel/prom_parse.c
+++ b/trunk/arch/microblaze/kernel/prom_parse.c
@@ -73,7 +73,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
/* We can only get here if we hit a P2P bridge with no node,
* let's do standard swizzling and try again
*/
- lspec = pci_swizzle_interrupt_pin(pdev, lspec);
+ lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
pdev = ppdev;
}
diff --git a/trunk/arch/microblaze/kernel/sys_microblaze.c b/trunk/arch/microblaze/kernel/sys_microblaze.c
index 2250fe9d269b..6abab6ebedbe 100644
--- a/trunk/arch/microblaze/kernel/sys_microblaze.c
+++ b/trunk/arch/microblaze/kernel/sys_microblaze.c
@@ -47,10 +47,8 @@ asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs
return do_fork(flags, stack, regs, 0, NULL, NULL);
}
-asmlinkage long microblaze_execve(const char __user *filenamei,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- struct pt_regs *regs)
+asmlinkage long microblaze_execve(const char __user *filenamei, char __user *__user *argv,
+ char __user *__user *envp, struct pt_regs *regs)
{
int error;
char *filename;
@@ -79,9 +77,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register const char *__a __asm__("r5") = filename;
register const void *__b __asm__("r6") = argv;
diff --git a/trunk/arch/microblaze/pci/pci-common.c b/trunk/arch/microblaze/pci/pci-common.c
index 55ef532f32be..23be25fec4d6 100644
--- a/trunk/arch/microblaze/pci/pci-common.c
+++ b/trunk/arch/microblaze/pci/pci-common.c
@@ -27,11 +27,10 @@
#include
#include
#include
-#include
-#include
#include
#include
+#include
#include
#include
@@ -1078,7 +1077,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
struct dev_archdata *sd = &dev->dev.archdata;
/* Setup OF node pointer in archdata */
- dev->dev.of_node = pci_device_to_OF_node(dev);
+ sd->of_node = pci_device_to_OF_node(dev);
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
diff --git a/trunk/arch/microblaze/pci/xilinx_pci.c b/trunk/arch/microblaze/pci/xilinx_pci.c
index 0687a42a5bd4..7869a41b0f94 100644
--- a/trunk/arch/microblaze/pci/xilinx_pci.c
+++ b/trunk/arch/microblaze/pci/xilinx_pci.c
@@ -16,7 +16,6 @@
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/mips/kernel/syscall.c b/trunk/arch/mips/kernel/syscall.c
index 1dc6edff45e0..bddce0bca195 100644
--- a/trunk/arch/mips/kernel/syscall.c
+++ b/trunk/arch/mips/kernel/syscall.c
@@ -258,10 +258,8 @@ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
- error = do_execve(filename,
- (const char __user *const __user *) (long)regs.regs[5],
- (const char __user *const __user *) (long)regs.regs[6],
- ®s);
+ error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
+ (char __user *__user *) (long)regs.regs[6], ®s);
putname(filename);
out:
@@ -438,9 +436,7 @@ asmlinkage void bad_stack(void)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register unsigned long __a0 asm("$4") = (unsigned long) filename;
register unsigned long __a1 asm("$5") = (unsigned long) argv;
diff --git a/trunk/arch/mn10300/kernel/process.c b/trunk/arch/mn10300/kernel/process.c
index f48373e2bc1c..762eb325b949 100644
--- a/trunk/arch/mn10300/kernel/process.c
+++ b/trunk/arch/mn10300/kernel/process.c
@@ -269,8 +269,8 @@ asmlinkage long sys_vfork(void)
}
asmlinkage long sys_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp)
+ char __user * __user *argv,
+ char __user * __user *envp)
{
char *filename;
int error;
diff --git a/trunk/arch/mn10300/mm/dma-alloc.c b/trunk/arch/mn10300/mm/dma-alloc.c
index 159acb02cfd4..4e34880bea03 100644
--- a/trunk/arch/mn10300/mm/dma-alloc.c
+++ b/trunk/arch/mn10300/mm/dma-alloc.c
@@ -25,8 +25,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
unsigned long addr;
void *ret;
- pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
- dev ? dev_name(dev) : "?", size, gfp);
+ printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
if (0xbe000000 - pci_sram_allocated >= size) {
size = (size + 255) & ~255;
diff --git a/trunk/arch/parisc/hpux/fs.c b/trunk/arch/parisc/hpux/fs.c
index 0dc8543acb4f..1444875a7611 100644
--- a/trunk/arch/parisc/hpux/fs.c
+++ b/trunk/arch/parisc/hpux/fs.c
@@ -41,10 +41,8 @@ int hpux_execve(struct pt_regs *regs)
if (IS_ERR(filename))
goto out;
- error = do_execve(filename,
- (const char __user *const __user *) regs->gr[25],
- (const char __user *const __user *) regs->gr[24],
- regs);
+ error = do_execve(filename, (char __user * __user *) regs->gr[25],
+ (char __user * __user *) regs->gr[24], regs);
putname(filename);
diff --git a/trunk/arch/parisc/kernel/process.c b/trunk/arch/parisc/kernel/process.c
index 4b4b9181a1a0..76332dadc6e9 100644
--- a/trunk/arch/parisc/kernel/process.c
+++ b/trunk/arch/parisc/kernel/process.c
@@ -348,22 +348,17 @@ asmlinkage int sys_execve(struct pt_regs *regs)
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
- error = do_execve(filename,
- (const char __user *const __user *) regs->gr[25],
- (const char __user *const __user *) regs->gr[24],
- regs);
+ error = do_execve(filename, (char __user * __user *) regs->gr[25],
+ (char __user * __user *) regs->gr[24], regs);
putname(filename);
out:
return error;
}
-extern int __execve(const char *filename,
- const char *const argv[],
- const char *const envp[], struct task_struct *task);
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+extern int __execve(const char *filename, char *const argv[],
+ char *const envp[], struct task_struct *task);
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
return __execve(filename, argv, envp, current);
}
diff --git a/trunk/arch/powerpc/Makefile b/trunk/arch/powerpc/Makefile
index b7212b619c52..e3ea151c9597 100644
--- a/trunk/arch/powerpc/Makefile
+++ b/trunk/arch/powerpc/Makefile
@@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
all: zImage
# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 := zImage zImage.initrd uImage
+BOOT_TARGETS1 := zImage zImage.initrd uImaged
BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
diff --git a/trunk/arch/powerpc/boot/dts/canyonlands.dts b/trunk/arch/powerpc/boot/dts/canyonlands.dts
index a30370396250..5806ef0b860b 100644
--- a/trunk/arch/powerpc/boot/dts/canyonlands.dts
+++ b/trunk/arch/powerpc/boot/dts/canyonlands.dts
@@ -163,14 +163,6 @@
interrupts = <0x1e 4>;
};
- SATA0: sata@bffd1000 {
- compatible = "amcc,sata-460ex";
- reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
- interrupt-parent = <&UIC3>;
- interrupts = <0x0 0x4 /* SATA */
- 0x5 0x4>; /* AHBDMA */
- };
-
POB0: opb {
compatible = "ibm,opb-460ex", "ibm,opb";
#address-cells = <1>;
diff --git a/trunk/arch/powerpc/include/asm/fsldma.h b/trunk/arch/powerpc/include/asm/fsldma.h
index debc5ed96d6e..a67aeed17d40 100644
--- a/trunk/arch/powerpc/include/asm/fsldma.h
+++ b/trunk/arch/powerpc/include/asm/fsldma.h
@@ -11,7 +11,6 @@
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
#define __ARCH_POWERPC_ASM_FSLDMA_H__
-#include
#include
/*
diff --git a/trunk/arch/powerpc/include/asm/mmu-hash64.h b/trunk/arch/powerpc/include/asm/mmu-hash64.h
index acac35d5b382..0e398cfee2c8 100644
--- a/trunk/arch/powerpc/include/asm/mmu-hash64.h
+++ b/trunk/arch/powerpc/include/asm/mmu-hash64.h
@@ -433,7 +433,7 @@ typedef struct {
* with. However gcc is not clever enough to compute the
* modulus (2^n-1) without a second multiply.
*/
-#define vsid_scramble(protovsid, size) \
+#define vsid_scrample(protovsid, size) \
((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
#else /* 1 */
diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h
index ff0005eec7dd..d8be016d2ede 100644
--- a/trunk/arch/powerpc/include/asm/reg.h
+++ b/trunk/arch/powerpc/include/asm/reg.h
@@ -951,14 +951,7 @@
#ifdef CONFIG_PPC64
extern void ppc64_runlatch_on(void);
-extern void __ppc64_runlatch_off(void);
-
-#define ppc64_runlatch_off() \
- do { \
- if (cpu_has_feature(CPU_FTR_CTRL) && \
- test_thread_flag(TIF_RUNLATCH)) \
- __ppc64_runlatch_off(); \
- } while (0)
+extern void ppc64_runlatch_off(void);
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
diff --git a/trunk/arch/powerpc/include/asm/rwsem.h b/trunk/arch/powerpc/include/asm/rwsem.h
index 8447d89fbe72..24cd9281ec37 100644
--- a/trunk/arch/powerpc/include/asm/rwsem.h
+++ b/trunk/arch/powerpc/include/asm/rwsem.h
@@ -21,20 +21,15 @@
/*
* the semaphore definition
*/
-#ifdef CONFIG_PPC64
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+struct rw_semaphore {
+ /* XXX this should be able to be an atomic_t -- paulus */
+ signed int count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-struct rw_semaphore {
- long count;
spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -48,13 +43,9 @@ struct rw_semaphore {
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
-{ \
- RWSEM_UNLOCKED_VALUE, \
- __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) \
-}
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -79,13 +70,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+ if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
- long tmp;
+ int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
@@ -101,10 +92,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- long tmp;
+ int tmp;
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count);
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
@@ -116,7 +107,7 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long tmp;
+ int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
@@ -128,9 +119,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- long tmp;
+ int tmp;
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+ tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -140,17 +131,17 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count) < 0))
+ if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0))
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
- atomic_long_add(delta, (atomic_long_t *)&sem->count);
+ atomic_add(delta, (atomic_t *)(&sem->count));
}
/*
@@ -158,10 +149,9 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
- long tmp;
+ int tmp;
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
- (atomic_long_t *)&sem->count);
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
@@ -169,14 +159,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement exchange and add functionality
*/
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
- return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
- return sem->count != 0;
+ return (sem->count != 0);
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/powerpc/include/asm/systbl.h b/trunk/arch/powerpc/include/asm/systbl.h
index 3d212669a130..a5ee345b6a5c 100644
--- a/trunk/arch/powerpc/include/asm/systbl.h
+++ b/trunk/arch/powerpc/include/asm/systbl.h
@@ -326,6 +326,3 @@ SYSCALL_SPU(perf_event_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
COMPAT_SYS(rt_tgsigqueueinfo)
-SYSCALL(fanotify_init)
-COMPAT_SYS(fanotify_mark)
-SYSCALL_SPU(prlimit64)
diff --git a/trunk/arch/powerpc/include/asm/unistd.h b/trunk/arch/powerpc/include/asm/unistd.h
index 597e6f9d094a..f0a10266e7f7 100644
--- a/trunk/arch/powerpc/include/asm/unistd.h
+++ b/trunk/arch/powerpc/include/asm/unistd.h
@@ -345,13 +345,10 @@
#define __NR_preadv 320
#define __NR_pwritev 321
#define __NR_rt_tgsigqueueinfo 322
-#define __NR_fanotify_init 323
-#define __NR_fanotify_mark 324
-#define __NR_prlimit64 325
#ifdef __KERNEL__
-#define __NR_syscalls 326
+#define __NR_syscalls 323
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c
index 1f9123f412ec..65e2b4e10f97 100644
--- a/trunk/arch/powerpc/kernel/cputable.c
+++ b/trunk/arch/powerpc/kernel/cputable.c
@@ -1826,6 +1826,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_47X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_47x |
MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 32,
diff --git a/trunk/arch/powerpc/kernel/crash.c b/trunk/arch/powerpc/kernel/crash.c
index 4457382f8667..417f7b05a9ce 100644
--- a/trunk/arch/powerpc/kernel/crash.c
+++ b/trunk/arch/powerpc/kernel/crash.c
@@ -402,18 +402,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
hard_irq_disable();
- /*
- * Make a note of crashing cpu. Will be used in machine_kexec
- * such that another IPI will not be sent.
- */
- crashing_cpu = smp_processor_id();
- crash_save_cpu(regs, crashing_cpu);
- crash_kexec_prepare_cpus(crashing_cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
- crash_kexec_wait_realmode(crashing_cpu);
-#endif
-
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
@@ -450,8 +438,18 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
+ /*
+ * Make a note of crashing cpu. Will be used in machine_kexec
+ * such that another IPI will not be sent.
+ */
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus(crashing_cpu);
+ cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus();
-
+#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+ crash_kexec_wait_realmode(crashing_cpu);
+#endif
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
diff --git a/trunk/arch/powerpc/kernel/head_44x.S b/trunk/arch/powerpc/kernel/head_44x.S
index 562305b40a8e..5ab484ef06a7 100644
--- a/trunk/arch/powerpc/kernel/head_44x.S
+++ b/trunk/arch/powerpc/kernel/head_44x.S
@@ -113,10 +113,6 @@ _ENTRY(_start);
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
- /* Clear the Machine Check Syndrome Register */
- li r0,0
- mtspr SPRN_MCSR,r0
-
/* Let's move on */
lis r4,start_kernel@h
ori r4,r4,start_kernel@l
diff --git a/trunk/arch/powerpc/kernel/head_64.S b/trunk/arch/powerpc/kernel/head_64.S
index c571cd3c1453..844a44b64472 100644
--- a/trunk/arch/powerpc/kernel/head_64.S
+++ b/trunk/arch/powerpc/kernel/head_64.S
@@ -572,21 +572,15 @@ __secondary_start:
/* Set thread priority to MEDIUM */
HMT_MEDIUM
- /* Initialize the kernel stack. Just a repeat for iSeries. */
- LOAD_REG_ADDR(r3, current_set)
- sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r14,r3,r28
- addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
- std r14,PACAKSAVE(r13)
-
/* Do early setup for that CPU (stab, slb, hash table pointer) */
bl .early_setup_secondary
- /*
- * setup the new stack pointer, but *don't* use this until
- * translation is on.
- */
- mr r1, r14
+ /* Initialize the kernel stack. Just a repeat for iSeries. */
+ LOAD_REG_ADDR(r3, current_set)
+ sldi r28,r24,3 /* get current_set[cpu#] */
+ ldx r1,r3,r28
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ std r1,PACAKSAVE(r13)
/* Clear backchain so we get nice backtraces */
li r7,0
diff --git a/trunk/arch/powerpc/kernel/idle.c b/trunk/arch/powerpc/kernel/idle.c
index 39a2baa6ad58..049dda60e475 100644
--- a/trunk/arch/powerpc/kernel/idle.c
+++ b/trunk/arch/powerpc/kernel/idle.c
@@ -94,9 +94,9 @@ void cpu_idle(void)
HMT_medium();
ppc64_runlatch_on();
tick_nohz_restart_sched_tick();
- preempt_enable_no_resched();
if (cpu_should_die())
cpu_die();
+ preempt_enable_no_resched();
schedule();
preempt_disable();
}
diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c
index 4a65386995d7..d3ce67cf03be 100644
--- a/trunk/arch/powerpc/kernel/irq.c
+++ b/trunk/arch/powerpc/kernel/irq.c
@@ -67,7 +67,6 @@
#include
#include
#include
-#include
#ifdef CONFIG_PPC64
#include
@@ -447,23 +446,22 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
void exc_lvl_ctx_init(void)
{
struct thread_info *tp;
- int i, hw_cpu;
+ int i;
for_each_possible_cpu(i) {
- hw_cpu = get_hard_smp_processor_id(i);
- memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = critirq_ctx[hw_cpu];
+ memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
+ tp = critirq_ctx[i];
tp->cpu = i;
tp->preempt_count = 0;
#ifdef CONFIG_BOOKE
- memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = dbgirq_ctx[hw_cpu];
+ memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
+ tp = dbgirq_ctx[i];
tp->cpu = i;
tp->preempt_count = 0;
- memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = mcheckirq_ctx[hw_cpu];
+ memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
+ tp = mcheckirq_ctx[i];
tp->cpu = i;
tp->preempt_count = HARDIRQ_OFFSET;
#endif
diff --git a/trunk/arch/powerpc/kernel/misc_32.S b/trunk/arch/powerpc/kernel/misc_32.S
index a7a570dcdd57..6bbd7a604d24 100644
--- a/trunk/arch/powerpc/kernel/misc_32.S
+++ b/trunk/arch/powerpc/kernel/misc_32.S
@@ -810,9 +810,6 @@ relocate_new_kernel:
isync
sync
- mfspr r3, SPRN_PIR /* current core we are running on */
- mr r4, r5 /* load physical address of chunk called */
-
/* jump to the entry point, usually the setup routine */
mtlr r5
blrl
diff --git a/trunk/arch/powerpc/kernel/pci_of_scan.c b/trunk/arch/powerpc/kernel/pci_of_scan.c
index e751506323b4..6ddb795f83e8 100644
--- a/trunk/arch/powerpc/kernel/pci_of_scan.c
+++ b/trunk/arch/powerpc/kernel/pci_of_scan.c
@@ -336,7 +336,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
struct device_node *child = pci_device_to_OF_node(dev);
- if (child)
+ if (dev)
of_scan_pci_bridge(child, dev);
}
}
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index b1c648a36b03..feacfb789686 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -728,7 +728,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.regs = childregs;
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_PPC64
- if (!is_32bit_task())
+ if (!test_thread_flag(TIF_32BIT))
childregs->gpr[13] = childregs->gpr[6];
else
#endif
@@ -823,7 +823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->nip = start;
regs->msr = MSR_USER;
#else
- if (!is_32bit_task()) {
+ if (!test_thread_flag(TIF_32BIT)) {
unsigned long entry, toc;
/* start is a relocated pointer to the function descriptor for
@@ -995,7 +995,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
if (usp == 0)
usp = regs->gpr[1]; /* stack pointer for child */
#ifdef CONFIG_PPC64
- if (is_32bit_task()) {
+ if (test_thread_flag(TIF_32BIT)) {
parent_tidp = TRUNC_PTR(parent_tidp);
child_tidp = TRUNC_PTR(child_tidp);
}
@@ -1034,9 +1034,8 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_spe_to_thread(current);
- error = do_execve(filename,
- (const char __user *const __user *) a1,
- (const char __user *const __user *) a2, regs);
+ error = do_execve(filename, (char __user * __user *) a1,
+ (char __user * __user *) a2, regs);
putname(filename);
out:
return error;
@@ -1199,17 +1198,19 @@ void ppc64_runlatch_on(void)
}
}
-void __ppc64_runlatch_off(void)
+void ppc64_runlatch_off(void)
{
unsigned long ctrl;
- HMT_medium();
+ if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
- clear_thread_flag(TIF_RUNLATCH);
+ clear_thread_flag(TIF_RUNLATCH);
- ctrl = mfspr(SPRN_CTRLF);
- ctrl &= ~CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
}
#endif
diff --git a/trunk/arch/powerpc/kernel/setup_32.c b/trunk/arch/powerpc/kernel/setup_32.c
index 93666f9cabf1..a10ffc85ada7 100644
--- a/trunk/arch/powerpc/kernel/setup_32.c
+++ b/trunk/arch/powerpc/kernel/setup_32.c
@@ -258,18 +258,17 @@ static void __init irqstack_early_init(void)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
static void __init exc_lvl_early_init(void)
{
- unsigned int i, hw_cpu;
+ unsigned int i;
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
- hw_cpu = get_hard_smp_processor_id(i);
- critirq_ctx[hw_cpu] = (struct thread_info *)
+ critirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
- dbgirq_ctx[hw_cpu] = (struct thread_info *)
+ dbgirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
- mcheckirq_ctx[hw_cpu] = (struct thread_info *)
+ mcheckirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#endif
}
diff --git a/trunk/arch/powerpc/kernel/setup_64.c b/trunk/arch/powerpc/kernel/setup_64.c
index e72690ec9b87..1bee4b68fa45 100644
--- a/trunk/arch/powerpc/kernel/setup_64.c
+++ b/trunk/arch/powerpc/kernel/setup_64.c
@@ -95,7 +95,7 @@ int ucache_bsize;
#ifdef CONFIG_SMP
-static char *smt_enabled_cmdline;
+static int smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
static void check_smt_enabled(void)
@@ -103,46 +103,37 @@ static void check_smt_enabled(void)
struct device_node *dn;
const char *smt_option;
- /* Default to enabling all threads */
- smt_enabled_at_boot = threads_per_core;
-
/* Allow the command line to overrule the OF option */
- if (smt_enabled_cmdline) {
- if (!strcmp(smt_enabled_cmdline, "on"))
- smt_enabled_at_boot = threads_per_core;
- else if (!strcmp(smt_enabled_cmdline, "off"))
- smt_enabled_at_boot = 0;
- else {
- long smt;
- int rc;
-
- rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
- if (!rc)
- smt_enabled_at_boot =
- min(threads_per_core, (int)smt);
- }
- } else {
- dn = of_find_node_by_path("/options");
- if (dn) {
- smt_option = of_get_property(dn, "ibm,smt-enabled",
- NULL);
-
- if (smt_option) {
- if (!strcmp(smt_option, "on"))
- smt_enabled_at_boot = threads_per_core;
- else if (!strcmp(smt_option, "off"))
- smt_enabled_at_boot = 0;
- }
-
- of_node_put(dn);
- }
- }
+ if (smt_enabled_cmdline)
+ return;
+
+ dn = of_find_node_by_path("/options");
+
+ if (dn) {
+ smt_option = of_get_property(dn, "ibm,smt-enabled", NULL);
+
+ if (smt_option) {
+ if (!strcmp(smt_option, "on"))
+ smt_enabled_at_boot = 1;
+ else if (!strcmp(smt_option, "off"))
+ smt_enabled_at_boot = 0;
+ }
+ }
}
/* Look for smt-enabled= cmdline option */
static int __init early_smt_enabled(char *p)
{
- smt_enabled_cmdline = p;
+ smt_enabled_cmdline = 1;
+
+ if (!p)
+ return 0;
+
+ if (!strcmp(p, "on") || !strcmp(p, "1"))
+ smt_enabled_at_boot = 1;
+ else if (!strcmp(p, "off") || !strcmp(p, "0"))
+ smt_enabled_at_boot = 0;
+
return 0;
}
early_param("smt-enabled", early_smt_enabled);
@@ -389,8 +380,8 @@ void __init setup_system(void)
*/
xmon_setup();
- smp_setup_cpu_maps();
check_smt_enabled();
+ smp_setup_cpu_maps();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c
index 0008bc58e826..a61b3ddd7bb3 100644
--- a/trunk/arch/powerpc/kernel/smp.c
+++ b/trunk/arch/powerpc/kernel/smp.c
@@ -427,11 +427,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
#endif
if (!cpu_callin_map[cpu]) {
- printk(KERN_ERR "Processor %u is stuck.\n", cpu);
+ printk("Processor %u is stuck.\n", cpu);
return -ENOENT;
}
- DBG("Processor %u found.\n", cpu);
+ printk("Processor %u found.\n", cpu);
if (smp_ops->give_timebase)
smp_ops->give_timebase();
diff --git a/trunk/arch/powerpc/kernel/sys_ppc32.c b/trunk/arch/powerpc/kernel/sys_ppc32.c
index b1b6043a56c4..20fd701a686a 100644
--- a/trunk/arch/powerpc/kernel/sys_ppc32.c
+++ b/trunk/arch/powerpc/kernel/sys_ppc32.c
@@ -616,11 +616,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
return sys_sync_file_range(fd, offset, nbytes, flags);
}
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
- unsigned mask_hi, unsigned mask_lo,
- int dfd, const char __user *pathname)
-{
- u64 mask = ((u64)mask_hi << 32) | mask_lo;
- return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c
index 8533b3b83f5d..ce53dfa7130d 100644
--- a/trunk/arch/powerpc/kernel/time.c
+++ b/trunk/arch/powerpc/kernel/time.c
@@ -577,11 +577,20 @@ void timer_interrupt(struct pt_regs * regs)
* some CPUs will continuue to take decrementer exceptions */
set_dec(DECREMENTER_MAX);
-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
+#ifdef CONFIG_PPC32
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
+ now = get_tb_or_rtc();
+ if (now < decrementer->next_tb) {
+ /* not time for this event yet */
+ now = decrementer->next_tb - now;
+ if (now <= DECREMENTER_MAX)
+ set_dec((int)now);
+ trace_timer_interrupt_exit(regs);
+ return;
+ }
old_regs = set_irq_regs(regs);
irq_enter();
@@ -597,16 +606,8 @@ void timer_interrupt(struct pt_regs * regs)
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
- now = get_tb_or_rtc();
- if (now >= decrementer->next_tb) {
- decrementer->next_tb = ~(u64)0;
- if (evt->event_handler)
- evt->event_handler(evt);
- } else {
- now = decrementer->next_tb - now;
- if (now <= DECREMENTER_MAX)
- set_dec((int)now);
- }
+ if (evt->event_handler)
+ evt->event_handler(evt);
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
diff --git a/trunk/arch/powerpc/kernel/vio.c b/trunk/arch/powerpc/kernel/vio.c
index fa3469ddaef8..00b9436f7652 100644
--- a/trunk/arch/powerpc/kernel/vio.c
+++ b/trunk/arch/powerpc/kernel/vio.c
@@ -1059,7 +1059,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
if (!dma_window)
return NULL;
- tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return NULL;
@@ -1072,7 +1072,6 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
- tbl->it_blocksize = 16;
return iommu_init_table(tbl, -1);
}
diff --git a/trunk/arch/powerpc/mm/init_64.c b/trunk/arch/powerpc/mm/init_64.c
index ace85fa74b29..71f1415e2472 100644
--- a/trunk/arch/powerpc/mm/init_64.c
+++ b/trunk/arch/powerpc/mm/init_64.c
@@ -79,9 +79,7 @@
#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
-EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
-EXPORT_SYMBOL_GPL(kernstart_addr);
void free_initmem(void)
{
diff --git a/trunk/arch/powerpc/mm/tlb_nohash_low.S b/trunk/arch/powerpc/mm/tlb_nohash_low.S
index b9d9fed8f36e..cfa768203d08 100644
--- a/trunk/arch/powerpc/mm/tlb_nohash_low.S
+++ b/trunk/arch/powerpc/mm/tlb_nohash_low.S
@@ -200,7 +200,6 @@ _GLOBAL(_tlbivax_bcast)
rlwimi r5,r4,0,16,31
wrteei 0
mtspr SPRN_MMUCR,r5
- isync
/* tlbivax 0,r3 - use .long to avoid binutils deps */
.long 0x7c000624 | (r3 << 11)
isync
diff --git a/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c b/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 83068322abd1..f9751c8905be 100644
--- a/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/trunk/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -48,10 +48,8 @@ static int mpc837xmds_usb_cfg(void)
return -1;
np = of_find_node_by_name(NULL, "usb");
- if (!np) {
- ret = -ENODEV;
- goto out;
- }
+ if (!np)
+ return -ENODEV;
phy_type = of_get_property(np, "phy_type", NULL);
if (phy_type && !strcmp(phy_type, "ulpi")) {
clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
@@ -67,9 +65,8 @@ static int mpc837xmds_usb_cfg(void)
}
of_node_put(np);
-out:
iounmap(bcsr_regs);
- return ret;
+ return 0;
}
/* ************************************************************************
diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index aa34cac4eb5c..da64be19d099 100644
--- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -357,7 +357,6 @@ static void __init mpc85xx_mds_setup_arch(void)
{
#ifdef CONFIG_PCI
struct pci_controller *hose;
- struct device_node *np;
#endif
dma_addr_t max = 0xffffffff;
diff --git a/trunk/arch/powerpc/platforms/85xx/p1022_ds.c b/trunk/arch/powerpc/platforms/85xx/p1022_ds.c
index 34e00902ce86..e1467c937450 100644
--- a/trunk/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/trunk/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -19,7 +19,7 @@
#include
#include
-#include
+#include
#include
#include
@@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void)
#endif
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if (lmb_end_of_DRAM() > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig
index 81c9208025fa..d1663db7810f 100644
--- a/trunk/arch/powerpc/platforms/Kconfig
+++ b/trunk/arch/powerpc/platforms/Kconfig
@@ -106,7 +106,8 @@ config MMIO_NVRAM
config MPIC_U3_HT_IRQS
bool
- default n
+ depends on PPC_MAPLE
+ default y
config MPIC_BROKEN_REGREAD
bool
diff --git a/trunk/arch/powerpc/platforms/cell/iommu.c b/trunk/arch/powerpc/platforms/cell/iommu.c
index 26a067122a54..58b13ce3847e 100644
--- a/trunk/arch/powerpc/platforms/cell/iommu.c
+++ b/trunk/arch/powerpc/platforms/cell/iommu.c
@@ -477,7 +477,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
ioid = cell_iommu_get_ioid(np);
- window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+ window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
BUG_ON(window == NULL);
window->offset = offset;
diff --git a/trunk/arch/powerpc/platforms/iseries/iommu.c b/trunk/arch/powerpc/platforms/iseries/iommu.c
index d8b76335bd13..ce61cea0afb5 100644
--- a/trunk/arch/powerpc/platforms/iseries/iommu.c
+++ b/trunk/arch/powerpc/platforms/iseries/iommu.c
@@ -184,7 +184,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
BUG_ON(lsn == NULL);
- tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
diff --git a/trunk/arch/powerpc/platforms/powermac/feature.c b/trunk/arch/powerpc/platforms/powermac/feature.c
index df423993f175..39df6ab1735a 100644
--- a/trunk/arch/powerpc/platforms/powermac/feature.c
+++ b/trunk/arch/powerpc/platforms/powermac/feature.c
@@ -2873,11 +2873,12 @@ set_initial_features(void)
/* Switch airport off */
for_each_node_by_name(np, "radio") {
- if (np->parent == macio_chips[0].of_node) {
+ if (np && np->parent == macio_chips[0].of_node) {
macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
core99_airport_enable(np, 0, 0);
}
}
+ of_node_put(np);
}
/* On all machines that support sound PM, switch sound off */
diff --git a/trunk/arch/powerpc/platforms/powermac/pci.c b/trunk/arch/powerpc/platforms/powermac/pci.c
index 3bc075c788ef..ab2027cdf893 100644
--- a/trunk/arch/powerpc/platforms/powermac/pci.c
+++ b/trunk/arch/powerpc/platforms/powermac/pci.c
@@ -1155,11 +1155,13 @@ void __init pmac_pcibios_after_init(void)
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
}
}
+ of_node_put(nd);
for_each_node_by_name(nd, "ethernet") {
if (nd->parent && of_device_is_compatible(nd, "gmac")
&& of_device_is_compatible(nd->parent, "uni-north"))
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
}
+ of_node_put(nd);
}
void pmac_pci_fixup_cardbus(struct pci_dev* dev)
diff --git a/trunk/arch/powerpc/platforms/pseries/dlpar.c b/trunk/arch/powerpc/platforms/pseries/dlpar.c
index 72d8054fa739..227c1c3d585e 100644
--- a/trunk/arch/powerpc/platforms/pseries/dlpar.c
+++ b/trunk/arch/powerpc/platforms/pseries/dlpar.c
@@ -129,35 +129,20 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
struct property *property;
struct property *last_property = NULL;
struct cc_workarea *ccwa;
- char *data_buf;
int cc_token;
- int rc = -1;
+ int rc;
cc_token = rtas_token("ibm,configure-connector");
if (cc_token == RTAS_UNKNOWN_SERVICE)
return NULL;
- data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!data_buf)
- return NULL;
-
- ccwa = (struct cc_workarea *)&data_buf[0];
+ spin_lock(&rtas_data_buf_lock);
+ ccwa = (struct cc_workarea *)&rtas_data_buf[0];
ccwa->drc_index = drc_index;
ccwa->zero = 0;
- do {
- /* Since we release the rtas_data_buf lock between configure
- * connector calls we want to re-populate the rtas_data_buffer
- * with the contents of the previous call.
- */
- spin_lock(&rtas_data_buf_lock);
-
- memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
- memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
-
- spin_unlock(&rtas_data_buf_lock);
-
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ while (rc) {
switch (rc) {
case NEXT_SIBLING:
dn = dlpar_parse_cc_node(ccwa);
@@ -212,19 +197,18 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
"returned from configure-connector\n", rc);
goto cc_error;
}
- } while (rc);
-cc_error:
- kfree(data_buf);
-
- if (rc) {
- if (first_dn)
- dlpar_free_cc_nodes(first_dn);
-
- return NULL;
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
}
+ spin_unlock(&rtas_data_buf_lock);
return first_dn;
+
+cc_error:
+ if (first_dn)
+ dlpar_free_cc_nodes(first_dn);
+ spin_unlock(&rtas_data_buf_lock);
+ return NULL;
}
static struct device_node *derive_parent(const char *path)
diff --git a/trunk/arch/powerpc/platforms/pseries/iommu.c b/trunk/arch/powerpc/platforms/pseries/iommu.c
index a77bcaed80af..395848e30c52 100644
--- a/trunk/arch/powerpc/platforms/pseries/iommu.c
+++ b/trunk/arch/powerpc/platforms/pseries/iommu.c
@@ -403,7 +403,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms(pci->phb, dn, tbl);
@@ -448,7 +448,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
pdn->full_name, ppci->iommu_table);
if (!ppci->iommu_table) {
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
ppci->phb->node);
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
bus->number);
@@ -478,7 +478,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
struct pci_controller *phb = PCI_DN(dn)->phb;
pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
phb->node);
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
@@ -544,7 +544,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pci = PCI_DN(pdn);
if (!pci->iommu_table) {
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
pci->phb->bus->number);
diff --git a/trunk/arch/powerpc/platforms/pseries/smp.c b/trunk/arch/powerpc/platforms/pseries/smp.c
index 0317cce877c6..3b1bf61c45be 100644
--- a/trunk/arch/powerpc/platforms/pseries/smp.c
+++ b/trunk/arch/powerpc/platforms/pseries/smp.c
@@ -182,13 +182,10 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it.
*/
- if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
- if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
- return 0;
- if (smt_enabled_at_boot
- && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
- return 0;
- }
+ if (system_state < SYSTEM_RUNNING &&
+ cpu_has_feature(CPU_FTR_SMT) &&
+ !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ return 0;
return 1;
}
diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c
index 93834b0d8272..5b22b07c8f67 100644
--- a/trunk/arch/powerpc/platforms/pseries/xics.c
+++ b/trunk/arch/powerpc/platforms/pseries/xics.c
@@ -928,10 +928,8 @@ void xics_migrate_irqs_away(void)
if (xics_status[0] != hw_cpu)
goto unlock;
- /* This is expected during cpu offline. */
- if (cpu_online(cpu))
- printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
- virq, cpu);
+ printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
+ virq, cpu);
/* Reset affinity to all cpus */
cpumask_setall(irq_to_desc(virq)->affinity);
diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c
index 4ae933225251..209384b6e039 100644
--- a/trunk/arch/powerpc/sysdev/fsl_pci.c
+++ b/trunk/arch/powerpc/sysdev/fsl_pci.c
@@ -399,8 +399,6 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
-DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
diff --git a/trunk/arch/powerpc/sysdev/fsl_rio.c b/trunk/arch/powerpc/sysdev/fsl_rio.c
index 3017532319c8..6425abe5b7db 100644
--- a/trunk/arch/powerpc/sysdev/fsl_rio.c
+++ b/trunk/arch/powerpc/sysdev/fsl_rio.c
@@ -240,13 +240,12 @@ struct rio_priv {
static void __iomem *rio_regs_win;
-#ifdef CONFIG_E500
static int (*saved_mcheck_exception)(struct pt_regs *regs);
static int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry = NULL;
- unsigned long reason = mfspr(SPRN_MCSR);
+ unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
if (reason & MCSR_BUS_RBERR) {
reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
@@ -270,7 +269,6 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
else
return cur_cpu_spec->machine_check(regs);
}
-#endif
/**
* fsl_rio_doorbell_send - Send a MPC85xx doorbell message
@@ -1519,10 +1517,8 @@ int fsl_rio_setup(struct platform_device *dev)
fsl_rio_doorbell_init(port);
fsl_rio_port_write_init(port);
-#ifdef CONFIG_E500
saved_mcheck_exception = ppc_md.machine_check_exception;
ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
-#endif
/* Ensure that RFXE is set */
mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
diff --git a/trunk/arch/powerpc/sysdev/qe_lib/qe.c b/trunk/arch/powerpc/sysdev/qe_lib/qe.c
index 90020de4dcf2..3da8014931c9 100644
--- a/trunk/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/trunk/arch/powerpc/sysdev/qe_lib/qe.c
@@ -640,7 +640,6 @@ unsigned int qe_get_num_of_snums(void)
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
/* No QE ever has fewer than 28 SNUMs */
pr_err("QE: number of snum is invalid\n");
- of_node_put(qe);
return -EINVAL;
}
}
diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c
index d17d04cfb2cd..0554445200bf 100644
--- a/trunk/arch/powerpc/xmon/xmon.c
+++ b/trunk/arch/powerpc/xmon/xmon.c
@@ -2880,14 +2880,15 @@ static void xmon_init(int enable)
}
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key)
+static void sysrq_handle_xmon(int key, struct tty_struct *tty)
{
/* ensure xmon is enabled */
xmon_init(1);
debugger(get_irq_regs());
}
-static struct sysrq_key_op sysrq_xmon_op = {
+static struct sysrq_key_op sysrq_xmon_op =
+{
.handler = sysrq_handle_xmon,
.help_msg = "Xmon",
.action_msg = "Entering xmon",
diff --git a/trunk/arch/s390/include/asm/hugetlb.h b/trunk/arch/s390/include/asm/hugetlb.h
index bb8343d157bc..670a1d1745d2 100644
--- a/trunk/arch/s390/include/asm/hugetlb.h
+++ b/trunk/arch/s390/include/asm/hugetlb.h
@@ -97,7 +97,6 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = huge_ptep_get(ptep);
- mm->context.flush_mm = 1;
pmd_clear((pmd_t *) ptep);
return pte;
}
@@ -168,8 +167,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
diff --git a/trunk/arch/s390/include/asm/mmu.h b/trunk/arch/s390/include/asm/mmu.h
index 78522cdefdd4..99e3409102b9 100644
--- a/trunk/arch/s390/include/asm/mmu.h
+++ b/trunk/arch/s390/include/asm/mmu.h
@@ -2,8 +2,6 @@
#define __MMU_H
typedef struct {
- atomic_t attach_count;
- unsigned int flush_mm;
spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list;
diff --git a/trunk/arch/s390/include/asm/mmu_context.h b/trunk/arch/s390/include/asm/mmu_context.h
index a6f0e7cc9cde..976e273988c2 100644
--- a/trunk/arch/s390/include/asm/mmu_context.h
+++ b/trunk/arch/s390/include/asm/mmu_context.h
@@ -11,14 +11,11 @@
#include
#include
-#include
#include
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- atomic_set(&mm->context.attach_count, 0);
- mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
@@ -79,12 +76,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
update_mm(next, tsk);
- atomic_dec(&prev->context.attach_count);
- WARN_ON(atomic_read(&prev->context.attach_count) < 0);
- atomic_inc(&next->context.attach_count);
- /* Check for TLBs not flushed yet */
- if (next->context.flush_mm)
- __tlb_flush_mm(next);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 3157441ee1da..89a504c3f12e 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -880,8 +880,7 @@ static inline void ptep_invalidate(struct mm_struct *mm,
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __address, __ptep); \
else \
@@ -924,8 +923,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
({ \
pte_t __pte = *(__ptep); \
if (pte_write(__pte)) { \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
diff --git a/trunk/arch/s390/include/asm/tlb.h b/trunk/arch/s390/include/asm/tlb.h
index fd1c00d08bf5..81150b053689 100644
--- a/trunk/arch/s390/include/asm/tlb.h
+++ b/trunk/arch/s390/include/asm/tlb.h
@@ -50,7 +50,8 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
+ (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
tlb->nr_ptes = 0;
tlb->nr_pxds = TLB_NR_PTRS;
if (tlb->fullmm)
diff --git a/trunk/arch/s390/include/asm/tlbflush.h b/trunk/arch/s390/include/asm/tlbflush.h
index 29d5d6d4becc..304cffa623e1 100644
--- a/trunk/arch/s390/include/asm/tlbflush.h
+++ b/trunk/arch/s390/include/asm/tlbflush.h
@@ -94,12 +94,8 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
- spin_lock(&mm->page_table_lock);
- if (mm->context.flush_mm) {
+ if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
__tlb_flush_mm(mm);
- mm->context.flush_mm = 0;
- }
- spin_unlock(&mm->page_table_lock);
}
/*
diff --git a/trunk/arch/s390/kernel/entry.h b/trunk/arch/s390/kernel/entry.h
index ff579b6bde06..403fb430a896 100644
--- a/trunk/arch/s390/kernel/entry.h
+++ b/trunk/arch/s390/kernel/entry.h
@@ -42,8 +42,8 @@ long sys_clone(unsigned long newsp, unsigned long clone_flags,
int __user *parent_tidptr, int __user *child_tidptr);
long sys_vfork(void);
void execve_tail(void);
-long sys_execve(const char __user *name, const char __user *const __user *argv,
- const char __user *const __user *envp);
+long sys_execve(const char __user *name, char __user * __user *argv,
+ char __user * __user *envp);
long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
diff --git a/trunk/arch/s390/kernel/process.c b/trunk/arch/s390/kernel/process.c
index d3a2d1c6438e..7eafaf2662b9 100644
--- a/trunk/arch/s390/kernel/process.c
+++ b/trunk/arch/s390/kernel/process.c
@@ -267,9 +267,8 @@ asmlinkage void execve_tail(void)
/*
* sys_execve() executes a new program.
*/
-SYSCALL_DEFINE3(execve, const char __user *, name,
- const char __user *const __user *, argv,
- const char __user *const __user *, envp)
+SYSCALL_DEFINE3(execve, const char __user *, name, char __user * __user *, argv,
+ char __user * __user *, envp)
{
struct pt_regs *regs = task_pt_regs(current);
char *filename;
diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c
index 8127ebd59c4d..541053ed234e 100644
--- a/trunk/arch/s390/kernel/smp.c
+++ b/trunk/arch/s390/kernel/smp.c
@@ -583,7 +583,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
- atomic_inc(&init_mm.context.attach_count);
asm volatile(
" stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -660,7 +659,6 @@ void __cpu_die(unsigned int cpu)
while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
udelay(10);
smp_free_lowcore(cpu);
- atomic_dec(&init_mm.context.attach_count);
pr_info("Processor %d stopped\n", cpu);
}
diff --git a/trunk/arch/s390/mm/init.c b/trunk/arch/s390/mm/init.c
index 30eb6d02ddb8..acc91c75bc94 100644
--- a/trunk/arch/s390/mm/init.c
+++ b/trunk/arch/s390/mm/init.c
@@ -74,8 +74,6 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
__raw_local_irq_ssm(ssm_mask);
- atomic_set(&init_mm.context.attach_count, 1);
-
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/trunk/arch/score/kernel/sys_score.c b/trunk/arch/score/kernel/sys_score.c
index e478bf9a7e91..651096ff8db4 100644
--- a/trunk/arch/score/kernel/sys_score.c
+++ b/trunk/arch/score/kernel/sys_score.c
@@ -99,10 +99,8 @@ score_execve(struct pt_regs *regs)
if (IS_ERR(filename))
return error;
- error = do_execve(filename,
- (const char __user *const __user *)regs->regs[5],
- (const char __user *const __user *)regs->regs[6],
- regs);
+ error = do_execve(filename, (char __user *__user*)regs->regs[5],
+ (char __user *__user *) regs->regs[6], regs);
putname(filename);
return error;
@@ -112,9 +110,7 @@ score_execve(struct pt_regs *regs)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register unsigned long __r4 asm("r4") = (unsigned long) filename;
register unsigned long __r5 asm("r5") = (unsigned long) argv;
diff --git a/trunk/arch/sh/kernel/process_32.c b/trunk/arch/sh/kernel/process_32.c
index 762a13984bbd..052981972ae6 100644
--- a/trunk/arch/sh/kernel/process_32.c
+++ b/trunk/arch/sh/kernel/process_32.c
@@ -296,10 +296,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(const char __user *ufilename,
- const char __user *const __user *uargv,
- const char __user *const __user *uenvp,
- unsigned long r7, struct pt_regs __regs)
+asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
+ char __user * __user *uenvp, unsigned long r7,
+ struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
int error;
diff --git a/trunk/arch/sh/kernel/process_64.c b/trunk/arch/sh/kernel/process_64.c
index 210c1cabcb7f..68d128d651b3 100644
--- a/trunk/arch/sh/kernel/process_64.c
+++ b/trunk/arch/sh/kernel/process_64.c
@@ -497,8 +497,8 @@ asmlinkage int sys_execve(const char *ufilename, char **uargv,
goto out;
error = do_execve(filename,
- (const char __user *const __user *)uargv,
- (const char __user *const __user *)uenvp,
+ (char __user * __user *)uargv,
+ (char __user * __user *)uenvp,
pregs);
putname(filename);
out:
diff --git a/trunk/arch/sh/kernel/sys_sh32.c b/trunk/arch/sh/kernel/sys_sh32.c
index f56b6fe5c5d0..eb68bfdd86e6 100644
--- a/trunk/arch/sh/kernel/sys_sh32.c
+++ b/trunk/arch/sh/kernel/sys_sh32.c
@@ -71,9 +71,7 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register long __sc0 __asm__ ("r3") = __NR_execve;
register long __sc4 __asm__ ("r4") = (long) filename;
diff --git a/trunk/arch/sh/kernel/sys_sh64.c b/trunk/arch/sh/kernel/sys_sh64.c
index c5a38c4bf410..287235768bc5 100644
--- a/trunk/arch/sh/kernel/sys_sh64.c
+++ b/trunk/arch/sh/kernel/sys_sh64.c
@@ -33,9 +33,7 @@
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
diff --git a/trunk/arch/sparc/kernel/process_32.c b/trunk/arch/sparc/kernel/process_32.c
index 17529298c50a..40e29fc8a4d6 100644
--- a/trunk/arch/sparc/kernel/process_32.c
+++ b/trunk/arch/sparc/kernel/process_32.c
@@ -633,10 +633,8 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
if(IS_ERR(filename))
goto out;
error = do_execve(filename,
- (const char __user *const __user *)
- regs->u_regs[base + UREG_I1],
- (const char __user *const __user *)
- regs->u_regs[base + UREG_I2],
+ (char __user * __user *)regs->u_regs[base + UREG_I1],
+ (char __user * __user *)regs->u_regs[base + UREG_I2],
regs);
putname(filename);
out:
diff --git a/trunk/arch/sparc/kernel/process_64.c b/trunk/arch/sparc/kernel/process_64.c
index c158a95ec664..dbe81a368b45 100644
--- a/trunk/arch/sparc/kernel/process_64.c
+++ b/trunk/arch/sparc/kernel/process_64.c
@@ -303,7 +303,7 @@ void arch_trigger_all_cpu_backtrace(void)
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_globreg(int key)
+static void sysrq_handle_globreg(int key, struct tty_struct *tty)
{
arch_trigger_all_cpu_backtrace();
}
@@ -739,9 +739,9 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
if (IS_ERR(filename))
goto out;
error = do_execve(filename,
- (const char __user *const __user *)
+ (char __user * __user *)
regs->u_regs[base + UREG_I1],
- (const char __user *const __user *)
+ (char __user * __user *)
regs->u_regs[base + UREG_I2], regs);
putname(filename);
if (!error) {
diff --git a/trunk/arch/sparc/kernel/sys_sparc_32.c b/trunk/arch/sparc/kernel/sys_sparc_32.c
index 50794137d710..de45de4851e9 100644
--- a/trunk/arch/sparc/kernel/sys_sparc_32.c
+++ b/trunk/arch/sparc/kernel/sys_sparc_32.c
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
{
siginfo_t info;
- lock_kernel();
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
#endif
- unlock_kernel();
}
asmlinkage int
@@ -282,9 +280,7 @@ asmlinkage int sys_getdomainname(char __user *name, int len)
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
long __res;
register long __g1 __asm__ ("g1") = __NR_execve;
diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c
index f836f4e93afe..3d435c42e6db 100644
--- a/trunk/arch/sparc/kernel/sys_sparc_64.c
+++ b/trunk/arch/sparc/kernel/sys_sparc_64.c
@@ -758,9 +758,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
long __res;
register long __g1 __asm__ ("g1") = __NR_execve;
diff --git a/trunk/arch/sparc/kernel/unaligned_32.c b/trunk/arch/sparc/kernel/unaligned_32.c
index f8514e291e15..12b9f352595f 100644
--- a/trunk/arch/sparc/kernel/unaligned_32.c
+++ b/trunk/arch/sparc/kernel/unaligned_32.c
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
- lock_kernel();
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
kill_user:
user_mna_trap_fault(regs, insn);
out:
- unlock_kernel();
+ ;
}
diff --git a/trunk/arch/sparc/kernel/windows.c b/trunk/arch/sparc/kernel/windows.c
index f24d298bda29..b351770cbdd6 100644
--- a/trunk/arch/sparc/kernel/windows.c
+++ b/trunk/arch/sparc/kernel/windows.c
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
struct thread_info *tp = current_thread_info();
int window;
- lock_kernel();
flush_user_windows();
for(window = 0; window < tp->w_saved; window++) {
unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
do_exit(SIGILL);
}
tp->w_saved = 0;
- unlock_kernel();
}
diff --git a/trunk/arch/tile/kernel/process.c b/trunk/arch/tile/kernel/process.c
index 985cc28c74c5..ed590ad0acdc 100644
--- a/trunk/arch/tile/kernel/process.c
+++ b/trunk/arch/tile/kernel/process.c
@@ -543,9 +543,8 @@ long _sys_vfork(struct pt_regs *regs)
/*
* sys_execve() executes a new program.
*/
-long _sys_execve(const char __user *path,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+long _sys_execve(char __user *path, char __user *__user *argv,
+ char __user *__user *envp, struct pt_regs *regs)
{
long error;
char *filename;
diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c
index ebc680717e59..de317d0c3294 100644
--- a/trunk/arch/um/drivers/mconsole_kern.c
+++ b/trunk/arch/um/drivers/mconsole_kern.c
@@ -690,7 +690,7 @@ static void with_console(struct mc_request *req, void (*proc)(void *),
static void sysrq_proc(void *arg)
{
char *op = arg;
- handle_sysrq(*op);
+ handle_sysrq(*op, NULL);
}
void mconsole_sysrq(struct mc_request *req)
diff --git a/trunk/arch/um/include/asm/dma-mapping.h b/trunk/arch/um/include/asm/dma-mapping.h
index 1f469e80fdd3..17a2cb5a4178 100644
--- a/trunk/arch/um/include/asm/dma-mapping.h
+++ b/trunk/arch/um/include/asm/dma-mapping.h
@@ -95,6 +95,13 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+static inline int
+dma_get_cache_alignment(void)
+{
+ BUG();
+ return(0);
+}
+
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
diff --git a/trunk/arch/um/kernel/exec.c b/trunk/arch/um/kernel/exec.c
index cd145eda3579..59b20d93b6d4 100644
--- a/trunk/arch/um/kernel/exec.c
+++ b/trunk/arch/um/kernel/exec.c
@@ -44,9 +44,8 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
PT_REGS_SP(regs) = esp;
}
-static long execve1(const char *file,
- const char __user *const __user *argv,
- const char __user *const __user *env)
+static long execve1(const char *file, char __user * __user *argv,
+ char __user *__user *env)
{
long error;
diff --git a/trunk/arch/um/kernel/syscall.c b/trunk/arch/um/kernel/syscall.c
index 5ddb246626db..7427c0b1930c 100644
--- a/trunk/arch/um/kernel/syscall.c
+++ b/trunk/arch/um/kernel/syscall.c
@@ -51,9 +51,7 @@ long old_mmap(unsigned long addr, unsigned long len,
return err;
}
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
mm_segment_t fs;
int ret;
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index cea0cd9a316f..a84fc34c8f77 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -245,11 +245,6 @@ config ARCH_HWEIGHT_CFLAGS
config KTIME_SCALAR
def_bool X86_32
-
-config ARCH_CPU_PROBE_RELEASE
- def_bool y
- depends on HOTPLUG_CPU
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -754,11 +749,11 @@ config IOMMU_API
def_bool (AMD_IOMMU || DMAR)
config MAXSMP
- bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
---help---
- Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
config NR_CPUS
diff --git a/trunk/arch/x86/include/asm/iomap.h b/trunk/arch/x86/include/asm/iomap.h
index c4191b3b7056..f35eb45d6576 100644
--- a/trunk/arch/x86/include/asm/iomap.h
+++ b/trunk/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
#include
#include
-void __iomem *
+void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void *kvaddr, enum km_type type);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/trunk/arch/x86/include/asm/kvm_emulate.h b/trunk/arch/x86/include/asm/kvm_emulate.h
index 1f99ecfc48e1..51cfd730ac5d 100644
--- a/trunk/arch/x86/include/asm/kvm_emulate.h
+++ b/trunk/arch/x86/include/asm/kvm_emulate.h
@@ -152,14 +152,9 @@ struct x86_emulate_ops {
struct operand {
enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
unsigned int bytes;
- union {
- unsigned long orig_val;
- u64 orig_val64;
- };
- unsigned long *ptr;
+ unsigned long orig_val, *ptr;
union {
unsigned long val;
- u64 val64;
char valptr[sizeof(unsigned long) + 2];
};
};
diff --git a/trunk/arch/x86/include/asm/pci.h b/trunk/arch/x86/include/asm/pci.h
index d395540ff894..404a880ea325 100644
--- a/trunk/arch/x86/include/asm/pci.h
+++ b/trunk/arch/x86/include/asm/pci.h
@@ -27,9 +27,6 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
int node);
extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
-#ifdef CONFIG_PCI
-
-#ifdef CONFIG_PCI_DOMAINS
static inline int pci_domain_nr(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
@@ -40,12 +37,13 @@ static inline int pci_proc_domain(struct pci_bus *bus)
{
return pci_domain_nr(bus);
}
-#endif
+
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
+#ifdef CONFIG_PCI
extern unsigned int pcibios_assign_all_busses(void);
extern int pci_legacy_init(void);
# ifdef CONFIG_ACPI
diff --git a/trunk/arch/x86/include/asm/pgtable_32.h b/trunk/arch/x86/include/asm/pgtable_32.h
index f686f49e8b7b..2984a25ff383 100644
--- a/trunk/arch/x86/include/asm/pgtable_32.h
+++ b/trunk/arch/x86/include/asm/pgtable_32.h
@@ -26,7 +26,6 @@ struct mm_struct;
struct vm_area_struct;
extern pgd_t swapper_pg_dir[1024];
-extern pgd_t trampoline_pg_dir[1024];
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
diff --git a/trunk/arch/x86/include/asm/syscalls.h b/trunk/arch/x86/include/asm/syscalls.h
index f1d8b441fc77..feb2ff9bfc2d 100644
--- a/trunk/arch/x86/include/asm/syscalls.h
+++ b/trunk/arch/x86/include/asm/syscalls.h
@@ -23,9 +23,8 @@ long sys_iopl(unsigned int, struct pt_regs *);
/* kernel/process.c */
int sys_fork(struct pt_regs *);
int sys_vfork(struct pt_regs *);
-long sys_execve(const char __user *,
- const char __user *const __user *,
- const char __user *const __user *, struct pt_regs *);
+long sys_execve(const char __user *, char __user * __user *,
+ char __user * __user *, struct pt_regs *);
long sys_clone(unsigned long, unsigned long, void __user *,
void __user *, struct pt_regs *);
diff --git a/trunk/arch/x86/include/asm/trampoline.h b/trunk/arch/x86/include/asm/trampoline.h
index 4dde797c0578..cb507bb05d79 100644
--- a/trunk/arch/x86/include/asm/trampoline.h
+++ b/trunk/arch/x86/include/asm/trampoline.h
@@ -13,17 +13,14 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
-extern unsigned long initial_page_table;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
extern unsigned long setup_trampoline(void);
-extern void __init setup_trampoline_page_table(void);
extern void __init reserve_trampoline_memory(void);
#else
-static inline void setup_trampoline_page_table(void) {}
-static inline void reserve_trampoline_memory(void) {}
+static inline void reserve_trampoline_memory(void) {};
#endif /* CONFIG_X86_TRAMPOLINE */
#endif /* __ASSEMBLY__ */
diff --git a/trunk/arch/x86/include/asm/tsc.h b/trunk/arch/x86/include/asm/tsc.h
index 1ca132fc0d03..c0427295e8f5 100644
--- a/trunk/arch/x86/include/asm/tsc.h
+++ b/trunk/arch/x86/include/asm/tsc.h
@@ -59,7 +59,5 @@ extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
extern int notsc_setup(char *);
-extern void save_sched_clock_state(void);
-extern void restore_sched_clock_state(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c
index f1efebaf5510..4dc0084ec1b1 100644
--- a/trunk/arch/x86/kernel/apic/io_apic.c
+++ b/trunk/arch/x86/kernel/apic/io_apic.c
@@ -1728,8 +1728,6 @@ __apicdebuginit(void) print_IO_APIC(void)
struct irq_pin_list *entry;
cfg = desc->chip_data;
- if (!cfg)
- continue;
entry = cfg->irq_2_pin;
if (!entry)
continue;
diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..60a57b13082d 100644
--- a/trunk/arch/x86/kernel/cpu/amd.c
+++ b/trunk/arch/x86/kernel/cpu/amd.c
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
}
/* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_mask;
+ ms = (cpu->x86_model << 8) | cpu->x86_mask;
while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 5e975298fa81..224392d8fe8c 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
- if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+ if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
- cpumask_set_cpu(cpu, b->cpus);
+ cpumask_copy(b->cpus, c->llc_shared_map);
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d9368eeda309..c2a8b26d4fea 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,11 +202,10 @@ static int therm_throt_process(bool new_event, int event, int level)
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
- unsigned int cpu)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
{
int err;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
if (err)
@@ -252,7 +251,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(sys_dev, cpu);
+ err = thermal_throttle_add_dev(sys_dev);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
@@ -288,7 +287,7 @@ static __init int thermal_throttle_init_device(void)
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
+ err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c
index 3efdf2870a35..f2da20fda02d 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event.c
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
/*
* event overflow
*/
- handled++;
+ handled = 1;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
@@ -1200,20 +1200,12 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
-struct pmu_nmi_state {
- unsigned int marked;
- int handled;
-};
-
-static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
-
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
- unsigned int this_nmi;
- int handled;
+ struct pt_regs *regs;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
@@ -1222,47 +1214,22 @@ perf_event_nmi_handler(struct notifier_block *self,
case DIE_NMI:
case DIE_NMI_IPI:
break;
- case DIE_NMIUNKNOWN:
- this_nmi = percpu_read(irq_stat.__nmi_count);
- if (this_nmi != __get_cpu_var(pmu_nmi).marked)
- /* let the kernel handle the unknown nmi */
- return NOTIFY_DONE;
- /*
- * This one is a PMU back-to-back nmi. Two events
- * trigger 'simultaneously' raising two back-to-back
- * NMIs. If the first NMI handles both, the latter
- * will be empty and daze the CPU. So, we drop it to
- * avoid false-positive 'unknown nmi' messages.
- */
- return NOTIFY_STOP;
+
default:
return NOTIFY_DONE;
}
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- handled = x86_pmu.handle_irq(args->regs);
- if (!handled)
- return NOTIFY_DONE;
+ regs = args->regs;
- this_nmi = percpu_read(irq_stat.__nmi_count);
- if ((handled > 1) ||
- /* the next nmi could be a back-to-back nmi */
- ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
- (__get_cpu_var(pmu_nmi).handled > 1))) {
- /*
- * We could have two subsequent back-to-back nmis: The
- * first handles more than one counter, the 2nd
- * handles only one counter and the 3rd handles no
- * counter.
- *
- * This is the 2nd nmi because the previous was
- * handling more than one counter. We will mark the
- * next (3rd) and then drop it if unhandled.
- */
- __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
- __get_cpu_var(pmu_nmi).handled = handled;
- }
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ /*
+ * Can't rely on the handled return value to say it was our NMI, two
+ * events could trigger 'simultaneously' raising two back-to-back NMIs.
+ *
+ * If the first NMI handles both, the latter will be empty and daze
+ * the CPU.
+ */
+ x86_pmu.handle_irq(regs);
return NOTIFY_STOP;
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index ee05c90012d2..214ac860ebe0 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -491,78 +491,33 @@ static void intel_pmu_enable_all(int added)
* Intel Errata AAP53 (model 30)
* Intel Errata BD53 (model 44)
*
- * The official story:
- * These chips need to be 'reset' when adding counters by programming the
- * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
- * in sequence on the same PMC or on different PMCs.
- *
- * In practise it appears some of these events do in fact count, and
- * we need to programm all 4 events.
+ * These chips need to be 'reset' when adding counters by programming
+ * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
+ * either in sequence on the same PMC or on different PMCs.
*/
-static void intel_pmu_nhm_workaround(void)
+static void intel_pmu_nhm_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- static const unsigned long nhm_magic[4] = {
- 0x4300B5,
- 0x4300D2,
- 0x4300B1,
- 0x4300B1
- };
- struct perf_event *event;
- int i;
-
- /*
- * The Errata requires below steps:
- * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
- * 2) Configure 4 PERFEVTSELx with the magic events and clear
- * the corresponding PMCx;
- * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
- * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
- * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
- */
+ if (added) {
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int i;
- /*
- * The real steps we choose are a little different from above.
- * A) To reduce MSR operations, we don't run step 1) as they
- * are already cleared before this function is called;
- * B) Call x86_perf_event_update to save PMCx before configuring
- * PERFEVTSELx with magic number;
- * C) With step 5), we do clear only when the PERFEVTSELx is
- * not used currently.
- * D) Call x86_perf_event_set_period to restore PMCx;
- */
-
- /* We always operate 4 pairs of PERF Counters */
- for (i = 0; i < 4; i++) {
- event = cpuc->events[i];
- if (event)
- x86_perf_event_update(event);
- }
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
- for (i = 0; i < 4; i++) {
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
- wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
- }
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+ for (i = 0; i < 3; i++) {
+ struct perf_event *event = cpuc->events[i];
- for (i = 0; i < 4; i++) {
- event = cpuc->events[i];
+ if (!event)
+ continue;
- if (event) {
- x86_perf_event_set_period(event);
__x86_pmu_enable_event(&event->hw,
- ARCH_PERFMON_EVENTSEL_ENABLE);
- } else
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
+ ARCH_PERFMON_EVENTSEL_ENABLE);
+ }
}
-}
-
-static void intel_pmu_nhm_enable_all(int added)
-{
- if (added)
- intel_pmu_nhm_workaround();
intel_pmu_enable_all(added);
}
@@ -712,8 +667,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
- u64 status;
- int handled = 0;
+ u64 ack, status;
perf_sample_data_init(&data, 0);
@@ -729,7 +683,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
loops = 0;
again:
- intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
@@ -738,22 +691,19 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
}
inc_irq_stat(apic_perf_irqs);
+ ack = status;
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status)) {
- handled++;
+ if (__test_and_clear_bit(62, (unsigned long *)&status))
x86_pmu.drain_pebs(regs);
- }
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
- handled++;
-
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -766,6 +716,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
x86_pmu_stop(event);
}
+ intel_pmu_ack_status(ack);
+
/*
* Repeat if there is more work to be done:
*/
@@ -775,7 +727,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
done:
intel_pmu_enable_all(0);
- return handled;
+ return 1;
}
static struct event_constraint *
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
index b560db3305be..febb12cea795 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
@@ -497,8 +497,6 @@ static int p4_hw_config(struct perf_event *event)
event->hw.config |= event->attr.config &
(p4_config_pack_escr(P4_ESCR_MASK_HT) |
p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
-
- event->hw.config &= ~P4_CCCR_FORCE_OVF;
}
rc = x86_setup_perfctr(event);
@@ -692,7 +690,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
inc_irq_stat(apic_perf_irqs);
}
- return handled;
+ return handled > 0;
}
/*
diff --git a/trunk/arch/x86/kernel/head_32.S b/trunk/arch/x86/kernel/head_32.S
index fa8c1b8e09fb..ff4c453e13f3 100644
--- a/trunk/arch/x86/kernel/head_32.S
+++ b/trunk/arch/x86/kernel/head_32.S
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
/*
* Enable paging
*/
- movl pa(initial_page_table), %eax
+ movl $pa(swapper_pg_dir),%eax
movl %eax,%cr3 /* set the page table pointer.. */
movl %cr0,%eax
orl $X86_CR0_PG,%eax
@@ -614,8 +614,6 @@ ignore_int:
.align 4
ENTRY(initial_code)
.long i386_start_kernel
-ENTRY(initial_page_table)
- .long pa(swapper_pg_dir)
/*
* BSS section
@@ -631,10 +629,6 @@ ENTRY(swapper_pg_dir)
#endif
swapper_pg_fixmap:
.fill 1024,4,0
-#ifdef CONFIG_X86_TRAMPOLINE
-ENTRY(trampoline_pg_dir)
- .fill 1024,4,0
-#endif
ENTRY(empty_zero_page)
.fill 4096,1,0
diff --git a/trunk/arch/x86/kernel/i387.c b/trunk/arch/x86/kernel/i387.c
index a46cb3522c0c..1f11f5ce668f 100644
--- a/trunk/arch/x86/kernel/i387.c
+++ b/trunk/arch/x86/kernel/i387.c
@@ -40,7 +40,6 @@
static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
static struct i387_fxsave_struct fx_scratch __cpuinitdata;
diff --git a/trunk/arch/x86/kernel/kgdb.c b/trunk/arch/x86/kernel/kgdb.c
index 852b81967a37..ef10940e1af0 100644
--- a/trunk/arch/x86/kernel/kgdb.c
+++ b/trunk/arch/x86/kernel/kgdb.c
@@ -194,7 +194,7 @@ static struct hw_breakpoint {
unsigned long addr;
int len;
int type;
- struct perf_event * __percpu *pev;
+ struct perf_event **pev;
} breakinfo[HBP_NUM];
static unsigned long early_dr7;
diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c
index 770ebfb349e9..1bfb6cf4dd55 100644
--- a/trunk/arch/x86/kernel/kprobes.c
+++ b/trunk/arch/x86/kernel/kprobes.c
@@ -709,7 +709,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
- kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -741,34 +740,14 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
- orig_ret_address = (unsigned long)ri->ret_addr;
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
+ orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
@@ -780,6 +759,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
break;
}
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c
index 57d1868a86aa..64ecaf0af9af 100644
--- a/trunk/arch/x86/kernel/process.c
+++ b/trunk/arch/x86/kernel/process.c
@@ -301,9 +301,8 @@ EXPORT_SYMBOL(kernel_thread);
/*
* sys_execve() executes a new program.
*/
-long sys_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+long sys_execve(const char __user *name, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
{
long error;
char *filename;
diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c
index c3a4fbb2b996..b008e7883207 100644
--- a/trunk/arch/x86/kernel/setup.c
+++ b/trunk/arch/x86/kernel/setup.c
@@ -1014,8 +1014,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
x86_init.paging.pagetable_setup_done(swapper_pg_dir);
- setup_trampoline_page_table();
-
tboot_probe();
#ifdef CONFIG_X86_64
diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c
index 8b3bfc4dd708..a5e928b0cb5f 100644
--- a/trunk/arch/x86/kernel/smpboot.c
+++ b/trunk/arch/x86/kernel/smpboot.c
@@ -73,6 +73,7 @@
#ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID];
+static int low_mappings;
#endif
/* State of each CPU */
@@ -90,25 +91,6 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
-
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
- mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -299,18 +281,6 @@ notrace static void __cpuinit start_secondary(void *unused)
* fragile that we want to limit the things done here to the
* most necessary things.
*/
-
-#ifdef CONFIG_X86_32
- /*
- * Switch away from the trampoline page-table
- *
- * Do this before cpu_init() because it needs to access per-cpu
- * data which may not be mapped in the trampoline page-table.
- */
- load_cr3(swapper_pg_dir);
- __flush_tlb_all();
-#endif
-
vmi_bringup();
cpu_init();
preempt_disable();
@@ -329,6 +299,12 @@ notrace static void __cpuinit start_secondary(void *unused)
legacy_pic->chip->unmask(0);
}
+#ifdef CONFIG_X86_32
+ while (low_mappings)
+ cpu_relax();
+ __flush_tlb_all();
+#endif
+
/* This must be done before setting cpu_online_mask */
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
@@ -774,7 +750,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
- initial_page_table = __pa(&trampoline_pg_dir);
#else
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
@@ -922,8 +897,20 @@ int __cpuinit native_cpu_up(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+#ifdef CONFIG_X86_32
+ /* init low mem mapping */
+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+ flush_tlb_all();
+ low_mappings = 1;
+
err = do_boot_cpu(apicid, cpu);
+ zap_low_mappings(false);
+ low_mappings = 0;
+#else
+ err = do_boot_cpu(apicid, cpu);
+#endif
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
return -EIO;
diff --git a/trunk/arch/x86/kernel/sys_i386_32.c b/trunk/arch/x86/kernel/sys_i386_32.c
index d5e06624e34a..196552bb412c 100644
--- a/trunk/arch/x86/kernel/sys_i386_32.c
+++ b/trunk/arch/x86/kernel/sys_i386_32.c
@@ -28,9 +28,7 @@
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
{
long __res;
asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
diff --git a/trunk/arch/x86/kernel/trampoline.c b/trunk/arch/x86/kernel/trampoline.c
index e2a595257390..c652ef62742d 100644
--- a/trunk/arch/x86/kernel/trampoline.c
+++ b/trunk/arch/x86/kernel/trampoline.c
@@ -1,7 +1,6 @@
#include
#include
-#include
#include
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -38,19 +37,3 @@ unsigned long __trampinit setup_trampoline(void)
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base);
}
-
-void __init setup_trampoline_page_table(void)
-{
-#ifdef CONFIG_X86_32
- /* Copy kernel address range */
- clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- KERNEL_PGD_PTRS);
-
- /* Initialize low mappings */
- clone_pgd_range(trampoline_pg_dir,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min_t(unsigned long, KERNEL_PGD_PTRS,
- KERNEL_PGD_BOUNDARY));
-#endif
-}
diff --git a/trunk/arch/x86/kernel/tsc.c b/trunk/arch/x86/kernel/tsc.c
index 26a863a9c2a8..ce8e50239332 100644
--- a/trunk/arch/x86/kernel/tsc.c
+++ b/trunk/arch/x86/kernel/tsc.c
@@ -626,44 +626,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
local_irq_restore(flags);
}
-static unsigned long long cyc2ns_suspend;
-
-void save_sched_clock_state(void)
-{
- if (!sched_clock_stable)
- return;
-
- cyc2ns_suspend = sched_clock();
-}
-
-/*
- * Even on processors with invariant TSC, TSC gets reset in some the
- * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
- * arbitrary value (still sync'd across cpu's) during resume from such sleep
- * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
- * that sched_clock() continues from the point where it was left off during
- * suspend.
- */
-void restore_sched_clock_state(void)
-{
- unsigned long long offset;
- unsigned long flags;
- int cpu;
-
- if (!sched_clock_stable)
- return;
-
- local_irq_save(flags);
-
- __get_cpu_var(cyc2ns_offset) = 0;
- offset = cyc2ns_suspend - sched_clock();
-
- for_each_possible_cpu(cpu)
- per_cpu(cyc2ns_offset, cpu) = offset;
-
- local_irq_restore(flags);
-}
-
#ifdef CONFIG_CPU_FREQ
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
diff --git a/trunk/arch/x86/kvm/emulate.c b/trunk/arch/x86/kvm/emulate.c
index 66ca98aafdd6..b38bd8b92aa6 100644
--- a/trunk/arch/x86/kvm/emulate.c
+++ b/trunk/arch/x86/kvm/emulate.c
@@ -1870,16 +1870,17 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old = c->dst.orig_val64;
+ u64 old = c->dst.orig_val;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
+
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
- c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
- (u32) c->regs[VCPU_REGS_RBX];
+ c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ (u32) c->regs[VCPU_REGS_RBX];
ctxt->eflags |= EFLG_ZF;
}
@@ -2615,7 +2616,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->src.valptr, c->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
- c->src.orig_val64 = c->src.val64;
+ c->src.orig_val = c->src.val;
}
if (c->src2.type == OP_MEM) {
diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c
index ddeb2314b522..0fd6378981f4 100644
--- a/trunk/arch/x86/kvm/i8254.c
+++ b/trunk/arch/x86/kvm/i8254.c
@@ -697,7 +697,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pit->wq = create_singlethread_workqueue("kvm-pit-wq");
if (!pit->wq) {
mutex_unlock(&pit->pit_state.lock);
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
return NULL;
}
@@ -743,7 +742,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
- destroy_workqueue(pit->wq);
+
kfree(pit);
return NULL;
}
diff --git a/trunk/arch/x86/kvm/i8259.c b/trunk/arch/x86/kvm/i8259.c
index 4b7b73ce2098..8d10c063d7f2 100644
--- a/trunk/arch/x86/kvm/i8259.c
+++ b/trunk/arch/x86/kvm/i8259.c
@@ -64,9 +64,6 @@ static void pic_unlock(struct kvm_pic *s)
if (!found)
found = s->kvm->bsp_vcpu;
- if (!found)
- return;
-
kvm_vcpu_kick(found);
}
}
diff --git a/trunk/arch/x86/kvm/irq.h b/trunk/arch/x86/kvm/irq.h
index 63c314502993..ffed06871c5c 100644
--- a/trunk/arch/x86/kvm/irq.h
+++ b/trunk/arch/x86/kvm/irq.h
@@ -43,6 +43,7 @@ struct kvm_kpic_state {
u8 irr; /* interrupt request register */
u8 imr; /* interrupt mask register */
u8 isr; /* interrupt service register */
+ u8 isr_ack; /* interrupt ack detection */
u8 priority_add; /* highest irq priority */
u8 irq_base;
u8 read_reg_select;
@@ -55,7 +56,6 @@ struct kvm_kpic_state {
u8 init4; /* true if 4 byte init */
u8 elcr; /* PIIX edge/trigger selection */
u8 elcr_mask;
- u8 isr_ack; /* interrupt ack detection */
struct kvm_pic *pics_state;
};
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index 3a09c625d526..25f19078b321 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -2387,7 +2387,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
if (cpu_has_xsave)
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
- xstate_size);
+ sizeof(struct xsave_struct));
else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
@@ -2405,7 +2405,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
if (cpu_has_xsave)
memcpy(&vcpu->arch.guest_fpu.state->xsave,
- guest_xsave->region, xstate_size);
+ guest_xsave->region, sizeof(struct xsave_struct));
else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
diff --git a/trunk/arch/x86/mm/iomap_32.c b/trunk/arch/x86/mm/iomap_32.c
index 72fc70cf6184..84e236ce76ba 100644
--- a/trunk/arch/x86/mm/iomap_32.c
+++ b/trunk/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
-void __iomem *
+void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+ return kmap_atomic_prot_pfn(pfn, type, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/trunk/arch/x86/oprofile/nmi_int.c b/trunk/arch/x86/oprofile/nmi_int.c
index cfe4faabb0f6..f6b48f6c5951 100644
--- a/trunk/arch/x86/oprofile/nmi_int.c
+++ b/trunk/arch/x86/oprofile/nmi_int.c
@@ -568,13 +568,8 @@ static int __init init_sysfs(void)
int error;
error = sysdev_class_register(&oprofile_sysclass);
- if (error)
- return error;
-
- error = sysdev_register(&device_oprofile);
- if (error)
- sysdev_class_unregister(&oprofile_sysclass);
-
+ if (!error)
+ error = sysdev_register(&device_oprofile);
return error;
}
@@ -585,10 +580,8 @@ static void exit_sysfs(void)
}
#else
-
-static inline int init_sysfs(void) { return 0; }
-static inline void exit_sysfs(void) { }
-
+#define init_sysfs() do { } while (0)
+#define exit_sysfs() do { } while (0)
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
@@ -702,8 +695,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
char *cpu_type = NULL;
int ret = 0;
- using_nmi = 0;
-
if (!cpu_has_apic)
return -ENODEV;
@@ -783,10 +774,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
mux_init(ops);
- ret = init_sysfs();
- if (ret)
- return ret;
-
+ init_sysfs();
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
diff --git a/trunk/arch/x86/power/cpu.c b/trunk/arch/x86/power/cpu.c
index 87bb35e34ef1..e7e8c5f54956 100644
--- a/trunk/arch/x86/power/cpu.c
+++ b/trunk/arch/x86/power/cpu.c
@@ -113,7 +113,6 @@ static void __save_processor_state(struct saved_context *ctxt)
void save_processor_state(void)
{
__save_processor_state(&saved_context);
- save_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(save_processor_state);
@@ -230,7 +229,6 @@ static void __restore_processor_state(struct saved_context *ctxt)
void restore_processor_state(void)
{
__restore_processor_state(&saved_context);
- restore_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(restore_processor_state);
diff --git a/trunk/arch/x86/xen/platform-pci-unplug.c b/trunk/arch/x86/xen/platform-pci-unplug.c
index 0f456386cce5..554c002a1e1a 100644
--- a/trunk/arch/x86/xen/platform-pci-unplug.c
+++ b/trunk/arch/x86/xen/platform-pci-unplug.c
@@ -72,17 +72,13 @@ void __init xen_unplug_emulated_devices(void)
{
int r;
- /* user explicitly requested no unplug */
- if (xen_emul_unplug & XEN_UNPLUG_NEVER)
- return;
/* check the version of the xen platform PCI device */
r = check_platform_magic();
/* If the version matches enable the Xen platform PCI driver.
- * Also enable the Xen platform PCI driver if the host does
- * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
- * but the user told us that unplugging is unnecessary. */
+ * Also enable the Xen platform PCI driver if the version is really old
+ * and the user told us to ignore it. */
if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
- (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
+ (xen_emul_unplug & XEN_UNPLUG_IGNORE)))
return;
/* Set the default value of xen_emul_unplug depending on whether or
* not the Xen PV frontends and the Xen platform PCI driver have
@@ -103,7 +99,7 @@ void __init xen_unplug_emulated_devices(void)
}
}
/* Now unplug the emulated devices */
- if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
+ if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE))
outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
xen_platform_pci_unplug = xen_emul_unplug;
}
@@ -129,10 +125,8 @@ static int __init parse_xen_emul_unplug(char *arg)
xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
else if (!strncmp(p, "nics", l))
xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
- else if (!strncmp(p, "unnecessary", l))
- xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
- else if (!strncmp(p, "never", l))
- xen_emul_unplug |= XEN_UNPLUG_NEVER;
+ else if (!strncmp(p, "ignore", l))
+ xen_emul_unplug |= XEN_UNPLUG_IGNORE;
else
printk(KERN_WARNING "unrecognised option '%s' "
"in parameter 'xen_emul_unplug'\n", p);
diff --git a/trunk/arch/xtensa/kernel/process.c b/trunk/arch/xtensa/kernel/process.c
index e3558b9a58ba..7c2f38f68ebb 100644
--- a/trunk/arch/xtensa/kernel/process.c
+++ b/trunk/arch/xtensa/kernel/process.c
@@ -318,9 +318,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
*/
asmlinkage
-long xtensa_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
+long xtensa_execve(const char __user *name, char __user * __user *argv,
+ char __user * __user *envp,
long a3, long a4, long a5,
struct pt_regs *regs)
{
diff --git a/trunk/block/blk-cgroup.c b/trunk/block/blk-cgroup.c
index 2fef1ef931a0..a6809645d212 100644
--- a/trunk/block/blk-cgroup.c
+++ b/trunk/block/blk-cgroup.c
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
/* Currently we do not support hierarchy deeper than two level (0,1) */
if (parent != cgroup->top_cgroup)
- return ERR_PTR(-EPERM);
+ return ERR_PTR(-EINVAL);
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
if (!blkcg)
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 32a1c123dfb3..ee1a1e7e63cc 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const bool sync = !!(bio->bi_rw & REQ_SYNC);
- const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
- const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const bool sync = (bio->bi_rw & REQ_SYNC);
+ const bool unplug = (bio->bi_rw & REQ_UNPLUG);
+ const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
if ((bio->bi_rw & REQ_HARDBARRIER) &&
diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c
index 0749b89c6885..001ab18078f5 100644
--- a/trunk/block/blk-sysfs.c
+++ b/trunk/block/blk-sysfs.c
@@ -511,7 +511,6 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- kobject_put(&dev->kobj);
return ret;
}
diff --git a/trunk/block/blk.h b/trunk/block/blk.h
index d6b911ac002c..6e7dc87141e4 100644
--- a/trunk/block/blk.h
+++ b/trunk/block/blk.h
@@ -142,18 +142,14 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
static inline int blk_cpu_to_group(int cpu)
{
- int group = NR_CPUS;
#ifdef CONFIG_SCHED_MC
const struct cpumask *mask = cpu_coregroup_mask(cpu);
- group = cpumask_first(mask);
+ return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- group = cpumask_first(topology_thread_cpumask(cpu));
+ return cpumask_first(topology_thread_cpumask(cpu));
#else
return cpu;
#endif
- if (likely(group < NR_CPUS))
- return group;
- return cpu;
}
/*
diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c
index f65c6f01c475..eb4086f7dfef 100644
--- a/trunk/block/cfq-iosched.c
+++ b/trunk/block/cfq-iosched.c
@@ -30,7 +30,6 @@ static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
-static int cfq_group_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
@@ -148,8 +147,6 @@ struct cfq_queue {
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
struct cfq_group *orig_cfqg;
- /* Number of sectors dispatched from queue in single dispatch round */
- unsigned long nr_sectors;
};
/*
@@ -201,8 +198,6 @@ struct cfq_group {
struct hlist_node cfqd_node;
atomic_t ref;
#endif
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
};
/*
@@ -276,7 +271,6 @@ struct cfq_data {
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
- unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_group_isolation;
@@ -384,21 +378,6 @@ CFQ_CFQQ_FNS(wait_busy);
&cfqg->service_trees[i][j]: NULL) \
-static inline bool iops_mode(struct cfq_data *cfqd)
-{
- /*
- * If we are not idling on queues and it is a NCQ drive, parallel
- * execution of requests is on and measuring time is not possible
- * in most of the cases until and unless we drive shallower queue
- * depths and that becomes a performance bottleneck. In such cases
- * switch to start providing fairness in terms of number of IOs.
- */
- if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
- return true;
- else
- return false;
-}
-
static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
@@ -927,6 +906,7 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
slice_used = cfqq->allocated_slice;
}
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
return slice_used;
}
@@ -934,21 +914,19 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
struct cfq_queue *cfqq)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- unsigned int used_sl, charge;
+ unsigned int used_sl, charge_sl;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
BUG_ON(nr_sync < 0);
- used_sl = charge = cfq_cfqq_slice_usage(cfqq);
+ used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
- if (iops_mode(cfqd))
- charge = cfqq->slice_dispatch;
- else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
- charge = cfqq->allocated_slice;
+ if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+ charge_sl = cfqq->allocated_slice;
/* Can't update vdisktime while group is on service tree */
cfq_rb_erase(&cfqg->rb_node, st);
- cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
+ cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
__cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
@@ -962,9 +940,6 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
- " sect=%u", used_sl, cfqq->slice_dispatch, charge,
- iops_mode(cfqd), cfqq->nr_sectors);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
}
@@ -1612,7 +1587,6 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
- cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1865,9 +1839,6 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(!service_tree);
BUG_ON(!service_tree->count);
- if (!cfqd->cfq_slice_idle)
- return false;
-
/* We never do for idle class queues. */
if (prio == IDLE_WORKLOAD)
return false;
@@ -1892,7 +1863,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq = cfqd->active_queue;
struct cfq_io_context *cic;
- unsigned long sl, group_idle = 0;
+ unsigned long sl;
/*
* SSD device without seek penalty, disable idling. But only do so
@@ -1908,13 +1879,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* idle is disabled, either manually or by past process history
*/
- if (!cfq_should_idle(cfqd, cfqq)) {
- /* no queue idling. Check for group idling */
- if (cfqd->cfq_group_idle)
- group_idle = cfqd->cfq_group_idle;
- else
- return;
- }
+ if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
+ return;
/*
* still active requests from this queue, don't idle
@@ -1941,21 +1907,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
return;
}
- /* There are other queues in the group, don't do group idle */
- if (group_idle && cfqq->cfqg->nr_cfqq > 1)
- return;
-
cfq_mark_cfqq_wait_request(cfqq);
- if (group_idle)
- sl = cfqd->cfq_group_idle;
- else
- sl = cfqd->cfq_slice_idle;
+ sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
- cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
- group_idle ? 1 : 0);
+ cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
}
/*
@@ -1971,11 +1929,9 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
cfq_remove_request(rq);
cfqq->dispatched++;
- (RQ_CFQG(rq))->dispatched++;
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
- cfqq->nr_sectors += blk_rq_sectors(rq);
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
rq_data_dir(rq), rq_is_sync(rq));
}
@@ -2242,7 +2198,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
cfqq = NULL;
goto keep_queue;
} else
- goto check_group_idle;
+ goto expire;
}
/*
@@ -2270,23 +2226,8 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
* flight or is idling for a new request, allow either of these
* conditions to happen (or time out) before selecting a new queue.
*/
- if (timer_pending(&cfqd->idle_slice_timer)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- /*
- * If group idle is enabled and there are requests dispatched from
- * this group, wait for requests to complete.
- */
-check_group_idle:
- if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
- && cfqq->cfqg->dispatched) {
+ if (timer_pending(&cfqd->idle_slice_timer) ||
+ (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
cfqq = NULL;
goto keep_queue;
}
@@ -3434,7 +3375,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
- (RQ_CFQG(rq))->dispatched--;
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
rq_data_dir(rq), rq_is_sync(rq));
@@ -3464,10 +3404,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
* the queue.
*/
if (cfq_should_wait_busy(cfqd, cfqq)) {
- unsigned long extend_sl = cfqd->cfq_slice_idle;
- if (!cfqd->cfq_slice_idle)
- extend_sl = cfqd->cfq_group_idle;
- cfqq->slice_end = jiffies + extend_sl;
+ cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}
@@ -3913,7 +3850,6 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_latency = 1;
cfqd->cfq_group_isolation = 0;
cfqd->hw_tag = -1;
@@ -3986,7 +3922,6 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
-SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -4019,7 +3954,6 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -4041,7 +3975,6 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
- CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(group_isolation),
__ATTR_NULL
@@ -4095,12 +4028,6 @@ static int __init cfq_init(void)
if (!cfq_slice_idle)
cfq_slice_idle = 1;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (!cfq_group_idle)
- cfq_group_idle = 1;
-#else
- cfq_group_idle = 0;
-#endif
if (cfq_slab_setup())
return -ENOMEM;
diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c
index 205b09a5bd9e..ec585c9554d3 100644
--- a/trunk/block/elevator.c
+++ b/trunk/block/elevator.c
@@ -1009,19 +1009,18 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
struct elevator_queue *old_elevator, *e;
void *data;
- int err;
/*
* Allocate new elevator
*/
e = elevator_alloc(q, new_e);
if (!e)
- return -ENOMEM;
+ return 0;
data = elevator_init_queue(q, e);
if (!data) {
kobject_put(&e->kobj);
- return -ENOMEM;
+ return 0;
}
/*
@@ -1044,8 +1043,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
__elv_unregister_queue(old_elevator);
- err = elv_register_queue(q);
- if (err)
+ if (elv_register_queue(q))
goto fail_register;
/*
@@ -1058,7 +1056,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
- return 0;
+ return 1;
fail_register:
/*
@@ -1073,19 +1071,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
- return err;
+ return 0;
}
-/*
- * Switch this queue to the given IO scheduler.
- */
-int elevator_change(struct request_queue *q, const char *name)
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
if (!q->elevator)
- return -ENXIO;
+ return count;
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(strstrip(elevator_name));
@@ -1096,27 +1092,13 @@ int elevator_change(struct request_queue *q, const char *name)
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
elevator_put(e);
- return 0;
- }
-
- return elevator_switch(q, e);
-}
-EXPORT_SYMBOL(elevator_change);
-
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
- size_t count)
-{
- int ret;
-
- if (!q->elevator)
- return count;
-
- ret = elevator_change(q, name);
- if (!ret)
return count;
+ }
- printk(KERN_ERR "elevator: switch to %s failed\n", name);
- return ret;
+ if (!elevator_switch(q, e))
+ printk(KERN_ERR "elevator: switch to %s failed\n",
+ elevator_name);
+ return count;
}
ssize_t elv_iosched_show(struct request_queue *q, char *name)
diff --git a/trunk/crypto/Kconfig b/trunk/crypto/Kconfig
index e573077f1672..1cd497d7a15a 100644
--- a/trunk/crypto/Kconfig
+++ b/trunk/crypto/Kconfig
@@ -101,13 +101,13 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2
-config CRYPTO_MANAGER_DISABLE_TESTS
- bool "Disable run-time self tests"
+config CRYPTO_MANAGER_TESTS
+ bool "Run algolithms' self-tests"
default y
depends on CRYPTO_MANAGER2
help
- Disable run-time self tests that normally take place at
- algorithm registration.
+ Run cryptomanager's tests for the new crypto algorithms being
+ registered.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
diff --git a/trunk/crypto/ahash.c b/trunk/crypto/ahash.c
index f669822a7a44..b8c59b889c6e 100644
--- a/trunk/crypto/ahash.c
+++ b/trunk/crypto/ahash.c
@@ -47,11 +47,8 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
- if (offset & alignmask) {
- unsigned int unaligned = alignmask + 1 - (offset & alignmask);
- if (nbytes > unaligned)
- nbytes = unaligned;
- }
+ if (offset & alignmask)
+ nbytes = alignmask + 1 - (offset & alignmask);
walk->entrylen -= nbytes;
return nbytes;
diff --git a/trunk/crypto/algboss.c b/trunk/crypto/algboss.c
index 791d194958fa..40bd391f34d9 100644
--- a/trunk/crypto/algboss.c
+++ b/trunk/crypto/algboss.c
@@ -206,16 +206,13 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
return NOTIFY_OK;
}
+#ifdef CONFIG_CRYPTO_MANAGER_TESTS
static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
int err = 0;
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
- goto skiptest;
-#endif
-
if (type & CRYPTO_ALG_TESTED)
goto skiptest;
@@ -270,6 +267,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
err:
return NOTIFY_OK;
}
+#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
@@ -277,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
+#ifdef CONFIG_CRYPTO_MANAGER_TESTS
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
+#endif
}
return NOTIFY_DONE;
diff --git a/trunk/crypto/testmgr.c b/trunk/crypto/testmgr.c
index fa8c8f78c8d4..abd980c729eb 100644
--- a/trunk/crypto/testmgr.c
+++ b/trunk/crypto/testmgr.c
@@ -23,7 +23,7 @@
#include "internal.h"
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifndef CONFIG_CRYPTO_MANAGER_TESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -2542,6 +2542,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
return -EINVAL;
}
-#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c
index 3ba8d1f44a73..1f67057af2a5 100644
--- a/trunk/drivers/acpi/pci_root.c
+++ b/trunk/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -225,31 +226,22 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
return status;
}
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
- u32 support,
- u32 *control)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
{
acpi_status status;
- u32 result, capbuf[3];
-
- support &= OSC_PCI_SUPPORT_MASKS;
- support |= root->osc_support_set;
+ u32 support_set, result, capbuf[3];
+ /* do _OSC query for all possible controls */
+ support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support;
- if (control) {
- *control &= OSC_PCI_CONTROL_MASKS;
- capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
- } else {
- /* Run _OSC query for all possible controls. */
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
- }
+ capbuf[OSC_SUPPORT_TYPE] = support_set;
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
- root->osc_support_set = support;
- if (control)
- *control = result;
+ root->osc_support_set = support_set;
+ root->osc_control_qry = result;
+ root->osc_queried = 1;
}
return status;
}
@@ -263,7 +255,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags, NULL);
+ status = acpi_pci_query_osc(root, flags);
mutex_unlock(&osc_lock);
return status;
}
@@ -373,70 +365,55 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
- * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
- * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
- * @mask: Mask of _OSC bits to request control of, place to store control mask.
- * @req: Mask of _OSC bits the control of is essential to the caller.
- *
- * Run _OSC query for @mask and if that is successful, compare the returned
- * mask of control bits with @req. If all of the @req bits are set in the
- * returned mask, run _OSC request for it.
+ * acpi_pci_osc_control_set - commit requested control to Firmware
+ * @handle: acpi_handle for the target ACPI object
+ * @flags: driver's requested control bits
*
- * The variable at the @mask address may be modified regardless of whether or
- * not the function returns success. On success it will contain the mask of
- * _OSC bits the BIOS has granted control of, but its contents are meaningless
- * on failure.
+ * Attempt to take control from Firmware on requested control bits.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
{
- struct acpi_pci_root *root;
acpi_status status;
- u32 ctrl, capbuf[3];
+ u32 control_req, result, capbuf[3];
acpi_handle tmp;
+ struct acpi_pci_root *root;
- if (!mask)
- return AE_BAD_PARAMETER;
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
- ctrl = *mask & OSC_PCI_CONTROL_MASKS;
- if ((ctrl & req) != req)
+ control_req = (flags & OSC_PCI_CONTROL_MASKS);
+ if (!control_req)
return AE_TYPE;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
-
mutex_lock(&osc_lock);
-
- *mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
- if ((root->osc_control_set & ctrl) == ctrl)
+ if ((root->osc_control_set & control_req) == control_req)
goto out;
- /* Need to check the available controls bits before requesting them. */
- while (*mask) {
- status = acpi_pci_query_osc(root, root->osc_support_set, mask);
+ /* Need to query controls first before requesting them */
+ if (!root->osc_queried) {
+ status = acpi_pci_query_osc(root, root->osc_support_set);
if (ACPI_FAILURE(status))
goto out;
- if (ctrl == *mask)
- break;
- ctrl = *mask;
}
-
- if ((ctrl & req) != req) {
+ if ((root->osc_control_qry & control_req) != control_req) {
+ printk(KERN_DEBUG
+ "Firmware did not grant requested _OSC control\n");
status = AE_SUPPORT;
goto out;
}
capbuf[OSC_QUERY_TYPE] = 0;
capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = ctrl;
- status = acpi_pci_run_osc(handle, capbuf, mask);
+ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
+ status = acpi_pci_run_osc(handle, capbuf, &result);
if (ACPI_SUCCESS(status))
- root->osc_control_set = *mask;
+ root->osc_control_set = result;
out:
mutex_unlock(&osc_lock);
return status;
@@ -567,6 +544,14 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
+ status = acpi_pci_osc_control_set(root->device->handle,
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
+ pcie_no_aspm();
+ }
+
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig
index 11ec911016c6..65e3e2708371 100644
--- a/trunk/drivers/ata/Kconfig
+++ b/trunk/drivers/ata/Kconfig
@@ -828,7 +828,6 @@ config PATA_SAMSUNG_CF
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA && EXPERIMENTAL
- select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
diff --git a/trunk/drivers/ata/Makefile b/trunk/drivers/ata/Makefile
index d5df04a395ca..158eaa961b1e 100644
--- a/trunk/drivers/ata/Makefile
+++ b/trunk/drivers/ata/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
+obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
obj-$(CONFIG_PATA_PXA) += pata_pxa.o
diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c
index ff1c945fba98..fe75d8befc3a 100644
--- a/trunk/drivers/ata/ahci.c
+++ b/trunk/drivers/ata/ahci.c
@@ -60,7 +60,6 @@ enum board_ids {
board_ahci,
board_ahci_ign_iferr,
board_ahci_nosntf,
- board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_mcp65,
@@ -133,14 +132,6 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_yes_fbs] =
- {
- AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- },
/* by chipsets */
[board_ahci_mcp65] =
{
@@ -253,9 +244,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
- { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -374,8 +362,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
- { PCI_DEVICE(0x1b4b, 0x9123),
- .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/trunk/drivers/ata/ahci.h b/trunk/drivers/ata/ahci.h
index 474427b6f99f..7113c5724471 100644
--- a/trunk/drivers/ata/ahci.h
+++ b/trunk/drivers/ata/ahci.h
@@ -209,7 +209,6 @@ enum {
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
- AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
/* ap->flags bits */
diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c
index d712675d0a96..3971bc0a4838 100644
--- a/trunk/drivers/ata/ata_piix.c
+++ b/trunk/drivers/ata/ata_piix.c
@@ -302,10 +302,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
- /* SATA Controller IDE (PBG) */
- { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
- /* SATA Controller IDE (PBG) */
- { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c
index 68dc6785472f..81e772a94d59 100644
--- a/trunk/drivers/ata/libahci.c
+++ b/trunk/drivers/ata/libahci.c
@@ -430,12 +430,6 @@ void ahci_save_initial_config(struct device *dev,
cap &= ~HOST_CAP_SNTF;
}
- if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
- dev_printk(KERN_INFO, dev,
- "controller can do FBS, turning on CAP_FBS\n");
- cap |= HOST_CAP_FBS;
- }
-
if (force_port_map && port_map != force_port_map) {
dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, force_port_map);
@@ -1326,7 +1320,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
/* issue the first D2H Register FIS */
msecs = 0;
now = jiffies;
- if (time_after(deadline, now))
+ if (time_after(now, deadline))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
@@ -2042,15 +2036,9 @@ static int ahci_port_start(struct ata_port *ap)
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
- else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
- dev_printk(KERN_INFO, dev,
- "port %d can do FBS, forcing FBSCP\n",
- ap->port_no);
- pp->fbs_supported = true;
- } else
+ else
dev_printk(KERN_WARNING, dev,
- "port %d is not capable of FBS\n",
- ap->port_no);
+ "The port is not capable of FBS\n");
}
if (pp->fbs_supported) {
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index 932eaee50245..7ef7c4f216fa 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -5111,18 +5111,15 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag;
- /*
- * We guarantee to LLDs that they will have at least one
+ /* We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- if (WARN_ON_ONCE(ata_is_data(prot) &&
- (!qc->sg || !qc->n_elem || !qc->nbytes)))
- goto sys_err;
+ BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
- goto sys_err;
+ goto sg_err;
/* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5139,7 +5136,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
goto err;
return;
-sys_err:
+sg_err:
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
@@ -5418,7 +5415,6 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
- unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
/*
@@ -5427,18 +5423,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
*/
ata_lpm_enable(host);
- /*
- * On some hardware, device fails to respond after spun down
- * for suspend. As the device won't be used before being
- * resumed, we don't need to touch the device. Ask EH to skip
- * the usual stuff and proceed directly to suspend.
- *
- * http://thread.gmane.org/gmane.linux.ide/46764
- */
- if (mesg.event == PM_EVENT_SUSPEND)
- ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
-
- rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
+ rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
return rc;
diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c
index e48302eae55f..c9ae299b8342 100644
--- a/trunk/drivers/ata/libata-eh.c
+++ b/trunk/drivers/ata/libata-eh.c
@@ -3235,10 +3235,6 @@ static int ata_eh_skip_recovery(struct ata_link *link)
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
- /* skip if explicitly requested */
- if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
- return 1;
-
/* thaw frozen port and recover failed devices */
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
return 0;
diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c
index e30c537cce32..674c1436491f 100644
--- a/trunk/drivers/ata/libata-sff.c
+++ b/trunk/drivers/ata/libata-sff.c
@@ -418,7 +418,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -454,8 +453,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
-
- ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -1045,8 +1042,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
- struct ata_link *link = qc->dev->link;
- struct ata_eh_info *ehi = &link->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags = 0;
int poll_next;
@@ -1302,14 +1298,8 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
{
- struct ata_port *ap = link->ap;
-
- WARN_ON((ap->sff_pio_task_link != NULL) &&
- (ap->sff_pio_task_link != link));
- ap->sff_pio_task_link = link;
-
/* may fail if ata_sff_flush_pio_task() in progress */
queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
msecs_to_jiffies(delay));
@@ -1331,18 +1321,14 @@ static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, sff_pio_task.work);
- struct ata_link *link = ap->sff_pio_task_link;
struct ata_queued_cmd *qc;
u8 status;
int poll_next;
- BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
- qc = ata_qc_from_tag(ap, link->active_tag);
- if (!qc) {
- ap->sff_pio_task_link = NULL;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc)
return;
- }
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1359,16 +1345,11 @@ static void ata_sff_pio_task(struct work_struct *work)
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
return;
}
}
- /*
- * hsm_move() may trigger another command to be processed.
- * clean the link beforehand.
- */
- ap->sff_pio_task_link = NULL;
/* move the HSM */
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
@@ -1395,7 +1376,6 @@ static void ata_sff_pio_task(struct work_struct *work)
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct ata_link *link = qc->dev->link;
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
@@ -1416,7 +1396,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
break;
@@ -1429,7 +1409,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
/* always send first data block using the
* ata_sff_pio_task() codepath.
@@ -1439,7 +1419,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
/* if polling, ata_sff_pio_task() handles the
* rest. otherwise, interrupt handler takes
@@ -1461,7 +1441,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
break;
default:
@@ -2754,7 +2734,10 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct ata_link *link = qc->dev->link;
+
+ /* see ata_dma_blacklisted() */
+ BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
+ qc->tf.protocol == ATAPI_PROT_DMA);
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
@@ -2783,7 +2766,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
break;
default:
diff --git a/trunk/drivers/ata/pata_artop.c b/trunk/drivers/ata/pata_artop.c
index 2215632e4b31..ba43f0f8c880 100644
--- a/trunk/drivers/ata/pata_artop.c
+++ b/trunk/drivers/ata/pata_artop.c
@@ -74,8 +74,7 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits (the -R cards) */
- if ((pdev->device & 1) &&
- !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
diff --git a/trunk/drivers/ata/pata_cmd64x.c b/trunk/drivers/ata/pata_cmd64x.c
index 905ff76d3cbb..9f5da1c7454b 100644
--- a/trunk/drivers/ata/pata_cmd64x.c
+++ b/trunk/drivers/ata/pata_cmd64x.c
@@ -121,8 +121,14 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
if (pair) {
struct ata_timing tp;
+
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+ if (pair->dma_mode) {
+ ata_timing_compute(pair, pair->dma_mode,
+ &tp, T, 0);
+ ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
+ }
}
}
diff --git a/trunk/drivers/ata/pata_legacy.c b/trunk/drivers/ata/pata_legacy.c
index eaf194138f21..9df1ff7e1eaa 100644
--- a/trunk/drivers/ata/pata_legacy.c
+++ b/trunk/drivers/ata/pata_legacy.c
@@ -44,9 +44,6 @@
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
*
- * Support for the Winbond 83759A when operating in advanced mode.
- * Multichip mode is not currently supported.
- *
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -138,18 +135,12 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
static int qdi; /* Set to probe QDI controllers */
+static int winbond; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
-#ifdef PATA_WINBOND_VLB_MODULE
-static int winbond = 1; /* Set to probe Winbond controllers,
- give I/O port if non standard */
-#else
-static int winbond; /* Set to probe Winbond controllers,
- give I/O port if non standard */
-#endif
-
/**
* legacy_probe_add - Add interface to probe list
* @port: Controller port
@@ -1306,7 +1297,6 @@ MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
module_param(autospeed, int, 0);
@@ -1315,7 +1305,6 @@ module_param(ht6560b, int, 0);
module_param(opti82c611a, int, 0);
module_param(opti82c46x, int, 0);
module_param(qdi, int, 0);
-module_param(winbond, int, 0);
module_param(pio_mask, int, 0);
module_param(iordy_mask, int, 0);
diff --git a/trunk/drivers/ata/pata_via.c b/trunk/drivers/ata/pata_via.c
index ac8d7d97e408..5e659885de16 100644
--- a/trunk/drivers/ata/pata_via.c
+++ b/trunk/drivers/ata/pata_via.c
@@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
tf->lbam,
tf->lbah);
}
-
- ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
diff --git a/trunk/drivers/ata/pata_winbond.c b/trunk/drivers/ata/pata_winbond.c
new file mode 100644
index 000000000000..6d8619b6f670
--- /dev/null
+++ b/trunk/drivers/ata/pata_winbond.c
@@ -0,0 +1,282 @@
+/*
+ * pata_winbond.c - Winbond VLB ATA controllers
+ * (C) 2006 Red Hat
+ *
+ * Support for the Winbond 83759A when operating in advanced mode.
+ * Multichip mode is not currently supported.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define DRV_NAME "pata_winbond"
+#define DRV_VERSION "0.0.3"
+
+#define NR_HOST 4 /* Two winbond controllers, two channels each */
+
+struct winbond_data {
+ unsigned long config;
+ struct platform_device *platform_dev;
+};
+
+static struct ata_host *winbond_host[NR_HOST];
+static struct winbond_data winbond_data[NR_HOST];
+static int nr_winbond_host;
+
+#ifdef MODULE
+static int probe_winbond = 1;
+#else
+static int probe_winbond;
+#endif
+
+static DEFINE_SPINLOCK(winbond_lock);
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ outb(val, port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+ u8 val;
+
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ val = inb(port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+
+ return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct winbond_data *winbond = ap->host->private_data;
+ int active, recovery;
+ u8 reg;
+ int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+ reg = winbond_readcfg(winbond->config, 0x81);
+
+ /* Get the timing data in cycles */
+ if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+ else
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
+ recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
+ timing = (active << 4) | recovery;
+ winbond_writecfg(winbond->config, timing, reg);
+
+ /* Load the setup timing */
+
+ reg = 0x35;
+ if (adev->class != ATA_DEV_ATA)
+ reg |= 0x08; /* FIFO off */
+ if (!ata_pio_need_iordy(adev))
+ reg |= 0x02; /* IORDY off */
+ reg |= (clamp_val(t.setup, 0, 3) << 6);
+ winbond_writecfg(winbond->config, timing + 1, reg);
+}
+
+
+static unsigned int winbond_data_xfer(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ int slop = buflen & 3;
+
+ if (ata_id_has_dword_io(dev->id)) {
+ if (rw == READ)
+ ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ else
+ iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+ if (unlikely(slop)) {
+ __le32 pad;
+ if (rw == READ) {
+ pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ memcpy(buf + buflen - slop, &pad, slop);
+ } else {
+ memcpy(&pad, buf + buflen - slop, slop);
+ iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ }
+ buflen += 4 - slop;
+ }
+ } else
+ buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ return buflen;
+}
+
+static struct scsi_host_template winbond_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations winbond_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = winbond_data_xfer,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = winbond_set_piomode,
+};
+
+/**
+ * winbond_init_one - attach a winbond interface
+ * @type: Type to display
+ * @io: I/O port start
+ * @irq: interrupt line
+ * @fast: True if on a > 33Mhz VLB
+ *
+ * Register a VLB bus IDE interface. Such interfaces are PIO and we
+ * assume do not support IRQ sharing.
+ */
+
+static __init int winbond_init_one(unsigned long port)
+{
+ struct platform_device *pdev;
+ u8 reg;
+ int i, rc;
+
+ reg = winbond_readcfg(port, 0x81);
+ reg |= 0x80; /* jumpered mode off */
+ winbond_writecfg(port, 0x81, reg);
+ reg = winbond_readcfg(port, 0x83);
+ reg |= 0xF0; /* local control */
+ winbond_writecfg(port, 0x83, reg);
+ reg = winbond_readcfg(port, 0x85);
+ reg |= 0xF0; /* programmable timing */
+ winbond_writecfg(port, 0x85, reg);
+
+ reg = winbond_readcfg(port, 0x81);
+
+ if (!(reg & 0x03)) /* Disabled */
+ return -ENODEV;
+
+ for (i = 0; i < 2 ; i ++) {
+ unsigned long cmd_port = 0x1F0 - (0x80 * i);
+ unsigned long ctl_port = cmd_port + 0x206;
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *cmd_addr, *ctl_addr;
+
+ if (!(reg & (1 << i)))
+ continue;
+
+ pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ rc = -ENOMEM;
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto err_unregister;
+ ap = host->ports[0];
+
+ rc = -ENOMEM;
+ cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
+ ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
+ if (!cmd_addr || !ctl_addr)
+ goto err_unregister;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
+
+ ap->ops = &winbond_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->ioaddr.cmd_addr = cmd_addr;
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ata_sff_std_ports(&ap->ioaddr);
+
+ /* hook in a private data structure per channel */
+ host->private_data = &winbond_data[nr_winbond_host];
+ winbond_data[nr_winbond_host].config = port;
+ winbond_data[nr_winbond_host].platform_dev = pdev;
+
+ /* activate */
+ rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
+ &winbond_sht);
+ if (rc)
+ goto err_unregister;
+
+ winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
+ }
+
+ return 0;
+
+ err_unregister:
+ platform_device_unregister(pdev);
+ return rc;
+}
+
+/**
+ * winbond_init - attach winbond interfaces
+ *
+ * Attach winbond IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int winbond_init(void)
+{
+ static const unsigned long config[2] = { 0x130, 0x1B0 };
+
+ int ct = 0;
+ int i;
+
+ if (probe_winbond == 0)
+ return -ENODEV;
+
+ /*
+ * Check both base addresses
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (probe_winbond & (1<sg;
struct ata_port *ap = qc->ap;
- int dma_chan;
+ u32 dma_chan;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
int err;
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
},
};
-static int sata_dwc_probe(struct platform_device *ofdev,
+static int sata_dwc_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ static int sata_dwc_probe(struct platform_device *ofdev,
return err;
}
-static int sata_dwc_remove(struct platform_device *ofdev)
+static int sata_dwc_remove(struct of_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/trunk/drivers/ata/sata_mv.c b/trunk/drivers/ata/sata_mv.c
index a9fd9709c262..9463c71dd38e 100644
--- a/trunk/drivers/ata/sata_mv.c
+++ b/trunk/drivers/ata/sata_mv.c
@@ -1898,25 +1898,19 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop_ap(struct ata_port *ap)
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- if (cmd & ATA_DMA_START) {
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
- }
-}
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
-{
- mv_bmdma_stop_ap(qc->ap);
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
}
/**
@@ -1940,21 +1934,8 @@ static u8 mv_bmdma_status(struct ata_port *ap)
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else if (reg & ATA_DMA_ERR)
+ else
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
- else {
- /*
- * Just because DMA_ACTIVE is 0 (DMA completed),
- * this does _not_ mean the device is "done".
- * So we should not yet be signalling ATA_DMA_INTR
- * in some cases. Eg. DSM/TRIM, and perhaps others.
- */
- mv_bmdma_stop_ap(ap);
- if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
- status = 0;
- else
- status = ATA_DMA_INTR;
- }
return status;
}
@@ -2014,9 +1995,6 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
- if (tf->command == ATA_CMD_DSM)
- return;
- /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2116,8 +2094,6 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
- if (tf->command == ATA_CMD_DSM)
- return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2284,7 +2260,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(link, 0);
+ ata_sff_queue_pio_task(ap, 0);
return 0;
}
@@ -2313,12 +2289,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
- if (qc->tf.command == ATA_CMD_DSM) {
- if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
- return AC_ERR_OTHER;
- break; /* use bmdma for this */
- }
- /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c
index 40af43ebd92d..c8a44f5e0584 100644
--- a/trunk/drivers/base/firmware_class.c
+++ b/trunk/drivers/base/firmware_class.c
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
out:
if (retval) {
release_firmware(firmware);
- *firmware_p = NULL;
+ firmware_p = NULL;
}
return retval;
diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c
index 6124c2fd2d33..31064df1370a 100644
--- a/trunk/drivers/block/cciss.c
+++ b/trunk/drivers/block/cciss.c
@@ -297,8 +297,6 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -4521,12 +4519,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* The doorbell reset seems to cause lockups on some Smart
- * Arrays (e.g. P410, P410i, maybe others). Until this is
- * fixed or at least isolated, avoid the doorbell reset.
- */
- use_doorbell = 0;
-
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
@@ -4720,9 +4712,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
h->scatter_list = kmalloc(h->max_commands *
sizeof(struct scatterlist *),
GFP_KERNEL);
- if (!h->scatter_list)
- goto clean4;
-
for (k = 0; k < h->nr_cmds; k++) {
h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
h->maxsgentries,
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index 91797bbbe702..f3c636d23718 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
+ bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
struct file *file = lo->lo_backing_file;
if (barrier) {
diff --git a/trunk/drivers/block/mg_disk.c b/trunk/drivers/block/mg_disk.c
index 76fa3deaee84..b82c5ce5e9df 100644
--- a/trunk/drivers/block/mg_disk.c
+++ b/trunk/drivers/block/mg_disk.c
@@ -974,7 +974,8 @@ static int mg_probe(struct platform_device *plat_dev)
host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
- err = elevator_change(host->breq, "noop");
+ elevator_exit(host->breq->elevator);
+ err = elevator_init(host->breq, "noop");
if (err) {
printk(KERN_ERR "%s:%d (elevator_init) fail\n",
__func__, __LINE__);
diff --git a/trunk/drivers/block/xen-blkfront.c b/trunk/drivers/block/xen-blkfront.c
index ab735a605cf3..ac1b682edecb 100644
--- a/trunk/drivers/block/xen-blkfront.c
+++ b/trunk/drivers/block/xen-blkfront.c
@@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev,
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
+ if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
diff --git a/trunk/drivers/block/xsysace.c b/trunk/drivers/block/xsysace.c
index 057413bb16e2..2982b3ee9465 100644
--- a/trunk/drivers/block/xsysace.c
+++ b/trunk/drivers/block/xsysace.c
@@ -94,7 +94,6 @@
#include
#include
#if defined(CONFIG_OF)
-#include
#include
#include
#endif
diff --git a/trunk/drivers/char/agp/intel-agp.c b/trunk/drivers/char/agp/intel-agp.c
index eab58db5f91c..ddf5def1b0da 100644
--- a/trunk/drivers/char/agp/intel-agp.c
+++ b/trunk/drivers/char/agp/intel-agp.c
@@ -12,7 +12,6 @@
#include
#include "agp.h"
#include "intel-agp.h"
-#include
#include "intel-gtt.c"
@@ -816,19 +815,9 @@ static const struct intel_driver_description {
"HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
"Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
@@ -836,8 +825,7 @@ static const struct intel_driver_description {
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
- int i, mask;
-
+ int i;
bridge->driver = NULL;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -857,19 +845,14 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
- if (bridge->driver->mask_memory == intel_gen6_mask_memory)
- mask = 40;
- else if (bridge->driver->mask_memory == intel_i965_mask_memory)
- mask = 36;
- else
- mask = 32;
-
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
return 1;
}
@@ -1053,7 +1036,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
{ }
};
diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h
index ee189c74d345..c05e3e518268 100644
--- a/trunk/drivers/char/agp/intel-agp.h
+++ b/trunk/drivers/char/agp/intel-agp.h
@@ -1,8 +1,6 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
-#ifndef _INTEL_AGP_H
-#define _INTEL_AGP_H
/* Intel registers */
#define INTEL_APSIZE 0xb4
@@ -202,16 +200,10 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -238,8 +230,7 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -252,5 +243,3 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
IS_SNB)
-
-#endif
diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c
index 75e0a3497888..d22ffb811bf2 100644
--- a/trunk/drivers/char/agp/intel-gtt.c
+++ b/trunk/drivers/char/agp/intel-gtt.c
@@ -49,26 +49,6 @@ static struct gatt_mask intel_i810_masks[] =
.type = INTEL_AGP_CACHED_MEMORY}
};
-#define INTEL_AGP_UNCACHED_MEMORY 0
-#define INTEL_AGP_CACHED_MEMORY_LLC 1
-#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-
-static struct gatt_mask intel_gen6_masks[] =
-{
- {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
- .type = INTEL_AGP_UNCACHED_MEMORY },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
-};
-
static struct _intel_private {
struct pci_dev *pcidev; /* device one */
u8 __iomem *registers;
@@ -198,6 +178,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
off_t pg_start, int mask_type)
{
int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = GEN6_PTE_LLC_MLC;
+ }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -330,23 +317,6 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
return 0;
}
-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
-
- if (type_mask == AGP_USER_UNCACHED_MEMORY)
- return INTEL_AGP_UNCACHED_MEMORY;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC_MLC;
- else /* set 'normal'/'cached' to LLC by default */
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC;
-}
-
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -618,7 +588,8 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = 0;
break;
}
- } else if (IS_SNB) {
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -1097,11 +1068,11 @@ static void intel_i9xx_setup_flush(void)
intel_i915_setup_chipset_flush();
}
- if (intel_private.ifp_resource.start)
+ if (intel_private.ifp_resource.start) {
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_err(&intel_private.pcidev->dev,
- "can't ioremap flush page - no chipset flushing\n");
+ if (!intel_private.i9xx_flush_page)
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+ }
}
static int intel_i9xx_configure(void)
@@ -1192,7 +1163,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
@@ -1362,8 +1333,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
dma_addr_t addr, int type)
{
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xff;
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
@@ -1388,7 +1359,6 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
break;
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
*gtt_offset = MB(2);
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1593,7 +1563,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_gen6_mask_memory,
- .masks = intel_gen6_masks,
+ .masks = intel_i810_masks,
.agp_enable = intel_i810_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = intel_i965_create_gatt_table,
@@ -1606,7 +1576,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
#ifdef USE_PCI_DMA_API
.agp_map_page = intel_agp_map_page,
diff --git a/trunk/drivers/char/hangcheck-timer.c b/trunk/drivers/char/hangcheck-timer.c
index f953c96efc86..e0249722d25f 100644
--- a/trunk/drivers/char/hangcheck-timer.c
+++ b/trunk/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
if (hangcheck_dump_tasks) {
printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('t');
+ handle_sysrq('t', NULL);
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
diff --git a/trunk/drivers/char/hvc_console.c b/trunk/drivers/char/hvc_console.c
index 3afd62e856eb..fa27d1676ee5 100644
--- a/trunk/drivers/char/hvc_console.c
+++ b/trunk/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
- handle_sysrq(buf[i]);
+ handle_sysrq(buf[i], tty);
sysrq_pressed = 0;
continue;
}
diff --git a/trunk/drivers/char/hvsi.c b/trunk/drivers/char/hvsi.c
index a2bc885ce60a..1f4b6de65a2d 100644
--- a/trunk/drivers/char/hvsi.c
+++ b/trunk/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
hp->sysrq = 1;
continue;
} else if (hp->sysrq) {
- handle_sysrq(c);
+ handle_sysrq(c, hp->tty);
hp->sysrq = 0;
continue;
}
diff --git a/trunk/drivers/char/hw_random/n2-drv.c b/trunk/drivers/char/hw_random/n2-drv.c
index a3f5e381e746..1acdb2509511 100644
--- a/trunk/drivers/char/hw_random/n2-drv.c
+++ b/trunk/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
static int n2rng_data_read(struct hwrng *rng, u32 *data)
{
- struct n2rng *np = (struct n2rng *) rng->priv;
+ struct n2rng *np = rng->priv;
unsigned long ra = __pa(&np->test_data);
int len;
diff --git a/trunk/drivers/char/ip2/ip2main.c b/trunk/drivers/char/ip2/ip2main.c
index d4b71e8d0d23..07f3ea38b582 100644
--- a/trunk/drivers/char/ip2/ip2main.c
+++ b/trunk/drivers/char/ip2/ip2main.c
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
/* disable DSS reporting */
i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if (tty->termios->c_cflag & HUPCL) {
+ if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,8 +2930,6 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
if ( pCh )
{
rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
- if (rc)
- rc = -EFAULT;
} else {
rc = -ENODEV;
}
diff --git a/trunk/drivers/char/pty.c b/trunk/drivers/char/pty.c
index c350d01716bd..ad46eae1f9bb 100644
--- a/trunk/drivers/char/pty.c
+++ b/trunk/drivers/char/pty.c
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
}
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-
- tty_add_file(tty, filp);
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
retval = devpts_pty_new(inode, tty->link);
if (retval)
diff --git a/trunk/drivers/char/rocket.c b/trunk/drivers/char/rocket.c
index 7c79d243acc9..79c3bc69165a 100644
--- a/trunk/drivers/char/rocket.c
+++ b/trunk/drivers/char/rocket.c
@@ -1244,7 +1244,6 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
}
info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
configure_r_port(tty, info, NULL);
- mutex_unlock(&info->port.mutex);
return 0;
}
diff --git a/trunk/drivers/char/synclink_gt.c b/trunk/drivers/char/synclink_gt.c
index e63b830c86cc..fef80cfcab5c 100644
--- a/trunk/drivers/char/synclink_gt.c
+++ b/trunk/drivers/char/synclink_gt.c
@@ -691,10 +691,8 @@ static int open(struct tty_struct *tty, struct file *filp)
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
- if (retval < 0) {
- mutex_unlock(&info->port.mutex);
+ if (retval < 0)
goto cleanup;
- }
}
mutex_unlock(&info->port.mutex);
retval = block_til_ready(tty, filp, info);
diff --git a/trunk/drivers/char/sysrq.c b/trunk/drivers/char/sysrq.c
index ef31bb81e843..878ac0c2cc68 100644
--- a/trunk/drivers/char/sysrq.c
+++ b/trunk/drivers/char/sysrq.c
@@ -18,6 +18,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -75,7 +76,7 @@ static int __init sysrq_always_enabled_setup(char *str)
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
-static void sysrq_handle_loglevel(int key)
+static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
int i;
@@ -92,7 +93,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
};
#ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key)
+static void sysrq_handle_SAK(int key, struct tty_struct *tty)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
@@ -108,7 +109,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
#endif
#ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key)
+static void sysrq_handle_unraw(int key, struct tty_struct *tty)
{
struct kbd_struct *kbd = &kbd_table[fg_console];
@@ -125,7 +126,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
-static void sysrq_handle_crash(int key)
+static void sysrq_handle_crash(int key, struct tty_struct *tty)
{
char *killer = NULL;
@@ -140,7 +141,7 @@ static struct sysrq_key_op sysrq_crash_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_reboot(int key)
+static void sysrq_handle_reboot(int key, struct tty_struct *tty)
{
lockdep_off();
local_irq_enable();
@@ -153,7 +154,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
.enable_mask = SYSRQ_ENABLE_BOOT,
};
-static void sysrq_handle_sync(int key)
+static void sysrq_handle_sync(int key, struct tty_struct *tty)
{
emergency_sync();
}
@@ -164,7 +165,7 @@ static struct sysrq_key_op sysrq_sync_op = {
.enable_mask = SYSRQ_ENABLE_SYNC,
};
-static void sysrq_handle_show_timers(int key)
+static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
{
sysrq_timer_list_show();
}
@@ -175,7 +176,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
-static void sysrq_handle_mountro(int key)
+static void sysrq_handle_mountro(int key, struct tty_struct *tty)
{
emergency_remount();
}
@@ -187,7 +188,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
};
#ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key)
+static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
{
debug_show_all_locks();
}
@@ -225,7 +226,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
-static void sysrq_handle_showallcpus(int key)
+static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
{
/*
* Fall back to the workqueue based printing if the
@@ -251,7 +252,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
};
#endif
-static void sysrq_handle_showregs(int key)
+static void sysrq_handle_showregs(int key, struct tty_struct *tty)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
@@ -265,7 +266,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate(int key)
+static void sysrq_handle_showstate(int key, struct tty_struct *tty)
{
show_state();
}
@@ -276,7 +277,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate_blocked(int key)
+static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
@@ -290,7 +291,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
#ifdef CONFIG_TRACING
#include
-static void sysrq_ftrace_dump(int key)
+static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
ftrace_dump(DUMP_ALL);
}
@@ -304,7 +305,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showmem(int key)
+static void sysrq_handle_showmem(int key, struct tty_struct *tty)
{
show_mem();
}
@@ -329,7 +330,7 @@ static void send_sig_all(int sig)
}
}
-static void sysrq_handle_term(int key)
+static void sysrq_handle_term(int key, struct tty_struct *tty)
{
send_sig_all(SIGTERM);
console_loglevel = 8;
@@ -348,7 +349,7 @@ static void moom_callback(struct work_struct *ignored)
static DECLARE_WORK(moom_work, moom_callback);
-static void sysrq_handle_moom(int key)
+static void sysrq_handle_moom(int key, struct tty_struct *tty)
{
schedule_work(&moom_work);
}
@@ -360,7 +361,7 @@ static struct sysrq_key_op sysrq_moom_op = {
};
#ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key)
+static void sysrq_handle_thaw(int key, struct tty_struct *tty)
{
emergency_thaw_all();
}
@@ -372,7 +373,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
};
#endif
-static void sysrq_handle_kill(int key)
+static void sysrq_handle_kill(int key, struct tty_struct *tty)
{
send_sig_all(SIGKILL);
console_loglevel = 8;
@@ -384,7 +385,7 @@ static struct sysrq_key_op sysrq_kill_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void sysrq_handle_unrt(int key)
+static void sysrq_handle_unrt(int key, struct tty_struct *tty)
{
normalize_rt_tasks();
}
@@ -492,7 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-void __handle_sysrq(int key, bool check_mask)
+void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -519,7 +520,7 @@ void __handle_sysrq(int key, bool check_mask)
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
- op_p->handler(key);
+ op_p->handler(key, tty);
} else {
printk("This sysrq operation is disabled.\n");
}
@@ -544,10 +545,10 @@ void __handle_sysrq(int key, bool check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-void handle_sysrq(int key)
+void handle_sysrq(int key, struct tty_struct *tty)
{
if (sysrq_on())
- __handle_sysrq(key, true);
+ __handle_sysrq(key, tty, 1);
}
EXPORT_SYMBOL(handle_sysrq);
@@ -596,7 +597,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
default:
if (sysrq_down && value && value != 2)
- __handle_sysrq(sysrq_xlate[code], true);
+ __handle_sysrq(sysrq_xlate[code], NULL, 1);
break;
}
@@ -764,7 +765,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
if (get_user(c, buf))
return -EFAULT;
- __handle_sysrq(c, false);
+ __handle_sysrq(c, NULL, 0);
}
return count;
diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c
index 613c852ee0fe..0350c42375a2 100644
--- a/trunk/drivers/char/tty_io.c
+++ b/trunk/drivers/char/tty_io.c
@@ -136,9 +136,6 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
DEFINE_MUTEX(tty_mutex);
EXPORT_SYMBOL(tty_mutex);
-/* Spinlock to protect the tty->tty_files list */
-DEFINE_SPINLOCK(tty_files_lock);
-
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -188,41 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
kfree(tty);
}
-static inline struct tty_struct *file_tty(struct file *file)
-{
- return ((struct tty_file_private *)file->private_data)->tty;
-}
-
-/* Associate a new file with the tty structure */
-void tty_add_file(struct tty_struct *tty, struct file *file)
-{
- struct tty_file_private *priv;
-
- /* XXX: must implement proper error handling in callers */
- priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
-
- priv->tty = tty;
- priv->file = file;
- file->private_data = priv;
-
- spin_lock(&tty_files_lock);
- list_add(&priv->list, &tty->tty_files);
- spin_unlock(&tty_files_lock);
-}
-
-/* Delete file from its tty */
-void tty_del_file(struct file *file)
-{
- struct tty_file_private *priv = file->private_data;
-
- spin_lock(&tty_files_lock);
- list_del(&priv->list);
- spin_unlock(&tty_files_lock);
- file->private_data = NULL;
- kfree(priv);
-}
-
-
#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
/**
@@ -273,11 +235,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
struct list_head *p;
int count = 0;
- spin_lock(&tty_files_lock);
+ file_list_lock();
list_for_each(p, &tty->tty_files) {
count++;
}
- spin_unlock(&tty_files_lock);
+ file_list_unlock();
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
@@ -355,7 +317,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
if (*stp == '\0')
stp = NULL;
- if (tty_line >= 0 && tty_line < p->num && p->ops &&
+ if (tty_line >= 0 && tty_line <= p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
@@ -535,7 +497,6 @@ void __tty_hangup(struct tty_struct *tty)
struct file *cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
- struct tty_file_private *priv;
int closecount = 0, n;
unsigned long flags;
int refs = 0;
@@ -545,7 +506,7 @@ void __tty_hangup(struct tty_struct *tty)
spin_lock(&redirect_lock);
- if (redirect && file_tty(redirect) == tty) {
+ if (redirect && redirect->private_data == tty) {
f = redirect;
redirect = NULL;
}
@@ -558,10 +519,9 @@ void __tty_hangup(struct tty_struct *tty)
workqueue with the lock held */
check_tty_count(tty, "tty_hangup");
- spin_lock(&tty_files_lock);
+ file_list_lock();
/* This breaks for file handles being sent over AF_UNIX sockets ? */
- list_for_each_entry(priv, &tty->tty_files, list) {
- filp = priv->file;
+ list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
if (filp->f_op->write == redirected_tty_write)
cons_filp = filp;
if (filp->f_op->write != tty_write)
@@ -570,7 +530,7 @@ void __tty_hangup(struct tty_struct *tty)
__tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
- spin_unlock(&tty_files_lock);
+ file_list_unlock();
tty_ldisc_hangup(tty);
@@ -929,10 +889,12 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int i;
- struct inode *inode = file->f_path.dentry->d_inode;
- struct tty_struct *tty = file_tty(file);
+ struct tty_struct *tty;
+ struct inode *inode;
struct tty_ldisc *ld;
+ tty = file->private_data;
+ inode = file->f_path.dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1103,11 +1065,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct tty_struct *tty;
struct inode *inode = file->f_path.dentry->d_inode;
- struct tty_struct *tty = file_tty(file);
- struct tty_ldisc *ld;
ssize_t ret;
+ struct tty_ldisc *ld;
+ tty = file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (!tty || !tty->ops->write ||
@@ -1461,9 +1424,9 @@ static void release_one_tty(struct work_struct *work)
tty_driver_kref_put(driver);
module_put(driver->owner);
- spin_lock(&tty_files_lock);
+ file_list_lock();
list_del_init(&tty->tty_files);
- spin_unlock(&tty_files_lock);
+ file_list_unlock();
put_pid(tty->pgrp);
put_pid(tty->session);
@@ -1544,13 +1507,13 @@ static void release_tty(struct tty_struct *tty, int idx)
int tty_release(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty = file_tty(filp);
- struct tty_struct *o_tty;
+ struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
int devpts;
int idx;
char buf[64];
+ tty = filp->private_data;
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
return 0;
@@ -1708,7 +1671,8 @@ int tty_release(struct inode *inode, struct file *filp)
* - do_tty_hangup no longer sees this file descriptor as
* something that needs to be handled for hangups.
*/
- tty_del_file(filp);
+ file_kill(filp);
+ filp->private_data = NULL;
/*
* Perform some housekeeping before deciding whether to return.
@@ -1875,8 +1839,8 @@ static int tty_open(struct inode *inode, struct file *filp)
return PTR_ERR(tty);
}
- tty_add_file(tty, filp);
-
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
check_tty_count(tty, "tty_open");
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1952,10 +1916,11 @@ static int tty_open(struct inode *inode, struct file *filp)
static unsigned int tty_poll(struct file *filp, poll_table *wait)
{
- struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *tty;
struct tty_ldisc *ld;
int ret = 0;
+ tty = filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
return 0;
@@ -1968,10 +1933,11 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
static int __tty_fasync(int fd, struct file *filp, int on)
{
- struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *tty;
unsigned long flags;
int retval = 0;
+ tty = filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
goto out;
@@ -2525,13 +2491,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
*/
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct tty_struct *tty = file_tty(file);
- struct tty_struct *real_tty;
+ struct tty_struct *tty, *real_tty;
void __user *p = (void __user *)arg;
int retval;
struct tty_ldisc *ld;
struct inode *inode = file->f_dentry->d_inode;
+ tty = file->private_data;
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
return -EINVAL;
@@ -2653,7 +2619,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
- struct tty_struct *tty = file_tty(file);
+ struct tty_struct *tty = file->private_data;
struct tty_ldisc *ld;
int retval = -ENOIOCTLCMD;
@@ -2745,7 +2711,7 @@ void __do_SAK(struct tty_struct *tty)
if (!filp)
continue;
if (filp->f_op->read == tty_read &&
- file_tty(filp) == tty) {
+ filp->private_data == tty) {
printk(KERN_NOTICE "SAK: killed process %d"
" (%s): fd#%d opened to the tty\n",
task_pid_nr(p), p->comm, i);
diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c
index 281aada7b4a1..c734f9b1263a 100644
--- a/trunk/drivers/char/vt.c
+++ b/trunk/drivers/char/vt.c
@@ -194,11 +194,10 @@ static DECLARE_WORK(console_work, console_callback);
int fg_console;
int last_console;
int want_console = -1;
-static int saved_fg_console;
-static int saved_last_console;
-static int saved_want_console;
-static int saved_vc_mode;
-static int saved_console_blanked;
+int saved_fg_console;
+int saved_last_console;
+int saved_want_console;
+int saved_vc_mode;
/*
* For each existing display, we have a pointer to console currently visible
@@ -906,16 +905,22 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* bottom of buffer
*/
old_origin += (old_rows - new_rows) * old_row_size;
+ end = vc->vc_scr_end;
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
+ end = old_origin + (old_row_size * new_rows);
}
- }
-
- end = old_origin + old_row_size * min(old_rows, new_rows);
+ } else
+ /*
+ * Cursor near the top, copy contents from the top of buffer
+ */
+ end = (old_rows > new_rows) ? old_origin +
+ (old_row_size * new_rows) :
+ vc->vc_scr_end;
update_attr(vc);
@@ -3069,7 +3074,8 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
- vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+ if (!vc->vc_origin)
+ vc->vc_origin = (unsigned long)vc->vc_screenbuf;
visual_init(vc, i, 0);
set_origin(vc);
update_attr(vc);
@@ -3443,7 +3449,6 @@ int con_debug_enter(struct vc_data *vc)
saved_last_console = last_console;
saved_want_console = want_console;
saved_vc_mode = vc->vc_mode;
- saved_console_blanked = console_blanked;
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
@@ -3487,7 +3492,6 @@ int con_debug_leave(void)
fg_console = saved_fg_console;
last_console = saved_last_console;
want_console = saved_want_console;
- console_blanked = saved_console_blanked;
vc_cons[fg_console].d->vc_mode = saved_vc_mode;
vc = vc_cons[fg_console].d;
diff --git a/trunk/drivers/char/vt_ioctl.c b/trunk/drivers/char/vt_ioctl.c
index 38df8c19e74c..2bbeaaea46e9 100644
--- a/trunk/drivers/char/vt_ioctl.c
+++ b/trunk/drivers/char/vt_ioctl.c
@@ -533,14 +533,11 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
case KIOCSOUND:
if (!perm)
goto eperm;
- /*
- * The use of PIT_TICK_RATE is historic, it used to be
- * the platform-dependent CLOCK_TICK_RATE between 2.6.12
- * and 2.6.36, which was a minor but unfortunate ABI
- * change.
- */
+ /* FIXME: This is an old broken API but we need to keep it
+ supported and somehow separate the historic advertised
+ tick rate from any real one */
if (arg)
- arg = PIT_TICK_RATE / arg;
+ arg = CLOCK_TICK_RATE / arg;
kd_mksound(arg, 0);
break;
@@ -556,8 +553,11 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
*/
ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
count = ticks ? (arg & 0xffff) : 0;
+ /* FIXME: This is an old broken API but we need to keep it
+ supported and somehow separate the historic advertised
+ tick rate from any real one */
if (count)
- count = PIT_TICK_RATE / count;
+ count = CLOCK_TICK_RATE / count;
kd_mksound(count, ticks);
break;
}
diff --git a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index b663d573aad9..0ed763cd2e77 100644
--- a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,7 +94,6 @@
#ifdef CONFIG_OF
/* For open firmware. */
-#include
#include
#include
#endif
diff --git a/trunk/drivers/edac/amd64_edac.c b/trunk/drivers/edac/amd64_edac.c
index e7d5d6b5dcf6..670239ab7511 100644
--- a/trunk/drivers/edac/amd64_edac.c
+++ b/trunk/drivers/edac/amd64_edac.c
@@ -2071,6 +2071,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
amd64_handle_ce(mci, info);
else if (ecc_type == 1)
amd64_handle_ue(mci, info);
+
+ /*
+ * If main error is CE then overflow must be CE. If main error is UE
+ * then overflow is unknown. We'll call the overflow a CE - if
+ * panic_on_ue is set then we're already panic'ed and won't arrive
+ * here. Else, then apparently someone doesn't think that UE's are
+ * catastrophic.
+ */
+ if (info->nbsh & K8_NBSH_OVERFLOW)
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
}
void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/trunk/drivers/edac/edac_mce_amd.c b/trunk/drivers/edac/edac_mce_amd.c
index 9014df6f605d..bae9351e9473 100644
--- a/trunk/drivers/edac/edac_mce_amd.c
+++ b/trunk/drivers/edac/edac_mce_amd.c
@@ -365,10 +365,11 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
pr_emerg("MC%d_STATUS: ", m->bank);
- pr_cont("%sorrected error, other errors lost: %s, "
+ pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
"CPU context corrupt: %s",
((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
- ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
+ ((m->status & MCI_STATUS_EN) ? "yes" : "no"),
+ ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
/* do the two bits[14:13] together */
@@ -425,15 +426,11 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
/*
- * We can decode MCEs for K8, F10h and F11h CPUs:
+ * We can decode MCEs for Opteron and later CPUs:
*/
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
- return 0;
-
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 >= 0xf))
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
return 0;
}
diff --git a/trunk/drivers/firewire/core-transaction.c b/trunk/drivers/firewire/core-transaction.c
index b42a0bde8494..ca7ca56661e0 100644
--- a/trunk/drivers/firewire/core-transaction.c
+++ b/trunk/drivers/firewire/core-transaction.c
@@ -81,10 +81,6 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- if (!del_timer(&t->split_timeout_timer)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
- }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -93,11 +89,11 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
+ del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
- timed_out:
return -ENOENT;
}
@@ -925,10 +921,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- if (!del_timer(&t->split_timeout_timer)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
- }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -937,7 +929,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
- timed_out:
fw_notify("Unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -972,6 +963,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ del_timer_sync(&t->split_timeout_timer);
+
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/trunk/drivers/firewire/net.c b/trunk/drivers/firewire/net.c
index 33f8421c71cc..da17d409a244 100644
--- a/trunk/drivers/firewire/net.c
+++ b/trunk/drivers/firewire/net.c
@@ -579,7 +579,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (!peer) {
fw_notify("No peer for ARP packet from %016llx\n",
(unsigned long long)peer_guid);
- goto no_peer;
+ goto failed_proto;
}
/*
@@ -656,7 +656,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
return 0;
- no_peer:
+ failed_proto:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -664,7 +664,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return -ENOENT;
+ return 0;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +701,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fw_error("out of memory\n");
net->stats.rx_dropped++;
- return -ENOMEM;
+ return -1;
}
skb_reserve(skb, (net->hard_header_len + 15) & ~15);
memcpy(skb_put(skb, len), buf, len);
@@ -726,10 +726,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
- if (!peer) {
- retval = -ENOENT;
- goto fail;
- }
+ if (!peer)
+ goto bad_proto;
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
@@ -743,7 +741,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
- goto fail;
+ goto bad_proto;
}
peer->pdg_size++;
} else {
@@ -757,9 +755,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
- peer->pdg_size--;
retval = -ENOMEM;
- goto fail;
+ peer->pdg_size--;
+ goto bad_proto;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -770,8 +768,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
- retval = -ENOMEM;
- goto fail;
+ goto bad_proto;
}
}
} /* new datagram or add to existing one */
@@ -797,13 +794,14 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
- fail:
+
+ bad_proto:
spin_unlock_irqrestore(&dev->lock, flags);
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return retval;
+ return 0;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/trunk/drivers/firewire/ohci.c b/trunk/drivers/firewire/ohci.c
index be29b0bb2471..7f03540cabe8 100644
--- a/trunk/drivers/firewire/ohci.c
+++ b/trunk/drivers/firewire/ohci.c
@@ -694,15 +694,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
log_ar_at_event('R', p.speed, p.header, evt);
/*
- * Several controllers, notably from NEC and VIA, forget to
- * write ack_complete status at PHY packet reception.
- */
- if (evt == OHCI1394_evt_no_status &&
- (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
- p.ack = ACK_COMPLETE;
-
- /*
- * The OHCI bus reset handler synthesizes a PHY packet with
+ * The OHCI bus reset handler synthesizes a phy packet with
* the new generation number when a bus reset happens (see
* section 8.4.2.3). This helps us determine when a request
* was received and make sure we send the response in the same
diff --git a/trunk/drivers/firewire/sbp2.c b/trunk/drivers/firewire/sbp2.c
index bfae4b309791..9f76171717e5 100644
--- a/trunk/drivers/firewire/sbp2.c
+++ b/trunk/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
- kref_put(&orb->kref, free_orb); /* orb callback reference */
+ kref_put(&orb->kref, free_orb);
} else {
fw_error("status write for unknown orb\n");
}
@@ -472,28 +472,20 @@ static void complete_transaction(struct fw_card *card, int rcode,
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
- *
- * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
- * OXUF936QSE firmwares occasionally respond after Split_Timeout and
- * complete the ORB just fine. Note, we also get RCODE_CANCELLED
- * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
*/
spin_lock_irqsave(&card->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
-
- if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
+ if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
-
orb->callback(orb, NULL);
- kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
spin_unlock_irqrestore(&card->lock, flags);
}
- kref_put(&orb->kref, free_orb); /* transaction callback reference */
+ kref_put(&orb->kref, free_orb);
}
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -509,8 +501,9 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&device->card->lock, flags);
- kref_get(&orb->kref); /* transaction callback reference */
- kref_get(&orb->kref); /* orb callback reference */
+ /* Take a ref for the orb list and for the transaction callback. */
+ kref_get(&orb->kref);
+ kref_get(&orb->kref);
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
@@ -532,11 +525,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
- fw_cancel_transaction(device->card, &orb->t);
+ if (fw_cancel_transaction(device->card, &orb->t) == 0)
+ continue;
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
- kref_put(&orb->kref, free_orb); /* orb callback reference */
}
return retval;
diff --git a/trunk/drivers/gpio/sx150x.c b/trunk/drivers/gpio/sx150x.c
index 823559ab0e24..b42f42ca70c3 100644
--- a/trunk/drivers/gpio/sx150x.c
+++ b/trunk/drivers/gpio/sx150x.c
@@ -459,32 +459,16 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
return err;
}
-static int sx150x_reset(struct sx150x_chip *chip)
-{
- int err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->reg_reset,
- 0x12);
- if (err < 0)
- return err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->reg_reset,
- 0x34);
- return err;
-}
-
static int sx150x_init_hw(struct sx150x_chip *chip,
struct sx150x_platform_data *pdata)
{
int err = 0;
- if (pdata->reset_during_probe) {
- err = sx150x_reset(chip);
- if (err < 0)
- return err;
- }
+ err = i2c_smbus_write_word_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x3412);
+ if (err < 0)
+ return err;
err = sx150x_i2c_write(chip->client,
chip->dev_cfg->reg_misc,
diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c
index d2ab01e90a96..7e31d4348340 100644
--- a/trunk/drivers/gpu/drm/drm_crtc_helper.c
+++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,9 +34,6 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
-static bool drm_kms_helper_poll = true;
-module_param_named(poll, drm_kms_helper_poll, bool, 0600);
-
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
@@ -102,10 +99,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
- } else {
+ } else
connector->status = connector->funcs->detect(connector);
- drm_helper_hpd_irq_event(dev);
- }
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -115,10 +110,11 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
}
count = (*connector_funcs->get_modes)(connector);
- if (count == 0 && connector->status == connector_status_connected)
+ if (!count) {
count = drm_add_modes_noedid(connector, 1024, 768);
- if (count == 0)
- goto prune;
+ if (!count)
+ return 0;
+ }
drm_mode_connector_list_update(connector);
@@ -844,9 +840,6 @@ static void output_poll_execute(struct work_struct *work)
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
- if (!drm_kms_helper_poll)
- return;
-
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -897,9 +890,6 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
bool poll = false;
struct drm_connector *connector;
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
- return;
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
@@ -929,10 +919,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
-
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/trunk/drivers/gpu/drm/drm_drv.c b/trunk/drivers/gpu/drm/drm_drv.c
index 84da748555bc..90288ec7c284 100644
--- a/trunk/drivers/gpu/drm/drm_drv.c
+++ b/trunk/drivers/gpu/drm/drm_drv.c
@@ -55,9 +55,6 @@
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
-
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -424,7 +421,6 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize;
dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
@@ -440,18 +436,11 @@ long drm_ioctl(struct file *filp,
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
- usize = asize = _IOC_SIZE(cmd);
} else
goto err_i1;
@@ -471,10 +460,10 @@ long drm_ioctl(struct file *filp,
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
+ if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
- kdata = kmalloc(asize, GFP_KERNEL);
+ kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
@@ -484,13 +473,11 @@ long drm_ioctl(struct file *filp,
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
+ _IOC_SIZE(cmd)) != 0) {
retcode = -EFAULT;
goto err_i1;
}
- } else
- memset(kdata, 0, usize);
-
+ }
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
@@ -501,7 +488,7 @@ long drm_ioctl(struct file *filp,
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
+ _IOC_SIZE(cmd)) != 0)
retcode = -EFAULT;
}
}
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 6a5e403f9aa1..de82e201d682 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -94,11 +94,10 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_connector *connector;
+ struct drm_connector *connector = fb_helper_conn->connector;
if (!fb_helper_conn)
return false;
- connector = fb_helper_conn->connector;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
@@ -370,7 +369,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1)
+static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
{
schedule_work(&drm_fb_helper_restore_work);
}
diff --git a/trunk/drivers/gpu/drm/drm_fops.c b/trunk/drivers/gpu/drm/drm_fops.c
index b744dad5c237..3a652a65546f 100644
--- a/trunk/drivers/gpu/drm/drm_fops.c
+++ b/trunk/drivers/gpu/drm/drm_fops.c
@@ -41,7 +41,6 @@
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
-EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev);
diff --git a/trunk/drivers/gpu/drm/drm_lock.c b/trunk/drivers/gpu/drm/drm_lock.c
index 9bf93bc9a32c..e2f70a516c34 100644
--- a/trunk/drivers/gpu/drm/drm_lock.c
+++ b/trunk/drivers/gpu/drm/drm_lock.c
@@ -92,9 +92,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/* Contention */
- mutex_unlock(&drm_global_mutex);
schedule();
- mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
diff --git a/trunk/drivers/gpu/drm/drm_mm.c b/trunk/drivers/gpu/drm/drm_mm.c
index a6bfc302ed90..da99edc50888 100644
--- a/trunk/drivers/gpu/drm/drm_mm.c
+++ b/trunk/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
EXPORT_SYMBOL(drm_mm_put_block);
-static int check_free_hole(unsigned long start, unsigned long end,
- unsigned long size, unsigned alignment)
+static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
+ unsigned alignment)
{
unsigned wasted = 0;
- if (end - start < size)
+ if (entry->size < size)
return 0;
if (alignment) {
- unsigned tmp = start % alignment;
+ register unsigned tmp = entry->start % alignment;
if (tmp)
wasted = alignment - tmp;
}
- if (end >= start + size + wasted) {
+ if (entry->size >= size + wasted) {
return 1;
}
@@ -320,8 +320,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
- size, alignment))
+ if (!check_free_mm_node(entry, size, alignment))
continue;
if (!best_match)
@@ -354,12 +353,10 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
+ if (entry->start > end || (entry->start+entry->size) < start)
+ continue;
- if (!check_free_hole(adj_start, adj_end, size, alignment))
+ if (!check_free_mm_node(entry, size, alignment))
continue;
if (!best_match)
@@ -452,8 +449,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_hole(node->start, node->start + node->size,
- mm->scan_size, mm->scan_alignment)) {
+ if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c
index 949326d2a8e5..f1f473ea97d3 100644
--- a/trunk/drivers/gpu/drm/drm_modes.c
+++ b/trunk/drivers/gpu/drm/drm_modes.c
@@ -251,10 +251,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
- /* Fill in VSync values */
- drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
- drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/trunk/drivers/gpu/drm/drm_vm.c b/trunk/drivers/gpu/drm/drm_vm.c
index fda67468e603..3778360eceea 100644
--- a/trunk/drivers/gpu/drm/drm_vm.c
+++ b/trunk/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
break;
}
- if (&agpmem->head == &dev->agp->memory)
+ if (!agpmem)
goto vm_fault_error;
/*
diff --git a/trunk/drivers/gpu/drm/i810/i810_dma.c b/trunk/drivers/gpu/drm/i810/i810_dma.c
index 61b4caf220fa..0e6c131313d9 100644
--- a/trunk/drivers/gpu/drm/i810/i810_dma.c
+++ b/trunk/drivers/gpu/drm/i810/i810_dma.c
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/trunk/drivers/gpu/drm/i830/i830_dma.c b/trunk/drivers/gpu/drm/i830/i830_dma.c
index 671aa18415ac..5168862c9227 100644
--- a/trunk/drivers/gpu/drm/i830/i830_dma.c
+++ b/trunk/drivers/gpu/drm/i830/i830_dma.c
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/trunk/drivers/gpu/drm/i915/Makefile b/trunk/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..da78f2c0d909 100644
--- a/trunk/drivers/gpu/drm/i915/Makefile
+++ b/trunk/drivers/gpu/drm/i915/Makefile
@@ -8,7 +8,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
- i915_gem_evict.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
@@ -19,7 +18,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
- intel_panel.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
diff --git a/trunk/drivers/gpu/drm/i915/dvo.h b/trunk/drivers/gpu/drm/i915/dvo.h
index 8c2ad014c47f..0d6ff640e1c6 100644
--- a/trunk/drivers/gpu/drm/i915/dvo.h
+++ b/trunk/drivers/gpu/drm/i915/dvo.h
@@ -30,17 +30,20 @@
#include "intel_drv.h"
struct intel_dvo_device {
- const char *name;
+ char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
- struct i2c_adapter *i2c_bus;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
};
struct intel_dvo_dev_ops {
diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
index 5e43d7076789..9214119c0154 100644
--- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,7 +31,6 @@
#include
#include "drmP.h"
#include "drm.h"
-#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -122,54 +121,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_gem_pageflip_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- unsigned long flags;
- struct intel_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- const char *pipe = crtc->pipe ? "B" : "A";
- const char *plane = crtc->plane ? "B" : "A";
- struct intel_unpin_work *work;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- work = crtc->unpin_work;
- if (work == NULL) {
- seq_printf(m, "No flip due on pipe %s (plane %s)\n",
- pipe, plane);
- } else {
- if (!work->pending) {
- seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
- pipe, plane);
- } else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
- pipe, plane);
- }
- if (work->enable_stall_check)
- seq_printf(m, "Stall check enabled, ");
- else
- seq_printf(m, "Stall check waiting for page flip ioctl, ");
- seq_printf(m, "%d prepares\n", work->pending);
-
- if (work->old_fb_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
- if(obj_priv)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
- }
- if (work->pending_flip_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
- if(obj_priv)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
- }
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- return 0;
-}
-
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -516,9 +467,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
- if (error->overlay)
- intel_overlay_print_error_state(m, error->overlay);
-
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -826,7 +774,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
- {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c
index 9d67b4853030..f19ffe87af3c 100644
--- a/trunk/drivers/gpu/drm/i915/i915_dma.c
+++ b/trunk/drivers/gpu/drm/i915/i915_dma.c
@@ -499,13 +499,6 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
-
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
i915_emit_breadcrumb(dev);
return 0;
@@ -620,10 +613,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
+ if (ret != 0)
goto fail_free;
- }
}
mutex_lock(&dev->struct_mutex);
@@ -664,10 +655,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0) {
- ret = -EFAULT;
+ if (ret != 0)
goto fail_batch_free;
- }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -680,10 +669,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
+ if (ret != 0)
goto fail_clip_free;
- }
}
mutex_lock(&dev->struct_mutex);
@@ -891,7 +878,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
- int ret;
+ int ret = 0;
if (IS_I965G(dev))
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -901,23 +888,22 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
- return 0;
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
+ ret = 0;
+ goto out;
+ }
#endif
/* Get some space for it */
- dev_priv->mch_res.name = "i915 MCHBAR";
- dev_priv->mch_res.flags = IORESOURCE_MEM;
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
- &dev_priv->mch_res,
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
+ 0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
- return ret;
+ goto out;
}
if (IS_I965G(dev))
@@ -926,7 +912,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
- return 0;
+out:
+ return ret;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2088,10 +2075,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- /* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
-
dev_priv->regs = ioremap(base, size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
@@ -2377,46 +2360,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
+ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c
index 216deb579785..5044f653e8ea 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.c
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.c
@@ -61,86 +61,91 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2, .is_i8xx = 1,
+ .is_i8xx = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .is_i8xx = 1,
+ .is_i8xx = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_i9xx = 1, .is_mobile = 1,
+ .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
- .has_hotplug = 1,
+ .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1, .is_gen6 = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1, .is_gen6 = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -175,12 +180,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
{0, 0, 0}
};
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..906663b9929e 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.h
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.h
@@ -113,9 +113,6 @@ struct intel_opregion {
int enabled;
};
-struct intel_overlay;
-struct intel_overlay_error_state;
-
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
@@ -169,7 +166,6 @@ struct drm_i915_error_state {
u32 purgeable:1;
} *active_bo;
u32 active_bo_count;
- struct intel_overlay_error_state *overlay;
};
struct drm_i915_display_funcs {
@@ -190,8 +186,9 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
+struct intel_overlay;
+
struct intel_device_info {
- u8 gen;
u8 is_mobile : 1;
u8 is_i8xx : 1;
u8 is_i85x : 1;
@@ -207,6 +204,7 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 is_ironlake : 1;
+ u8 is_gen6 : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
@@ -244,7 +242,6 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
- uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
@@ -254,7 +251,6 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
struct resource mch_res;
@@ -289,9 +285,6 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
- u32 flush_rings;
-#define FLUSH_RENDER_RING 0x1
-#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -575,6 +568,8 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
+ uint32_t next_gem_seqno;
+
/**
* Waiting sequence number, if any
*/
@@ -615,8 +610,6 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
- /* Panel fitter placement and size for Ironlake+ */
- u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -676,8 +669,6 @@ struct drm_i915_gem_object {
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
/**
* This is set if the object is on the active or flushing lists
@@ -987,7 +978,6 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
@@ -1001,9 +991,7 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
- int id,
- int align);
+ struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
@@ -1015,11 +1003,6 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
-/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
-
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1083,10 +1066,6 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
-/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
-extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
-
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -1113,26 +1092,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
+ intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
} while (0)
#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
+ intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
+ dev_priv->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0)
/**
@@ -1162,6 +1141,7 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1180,13 +1160,27 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN3(dev) (IS_I915G(dev) || \
+ IS_I915GM(dev) || \
+ IS_I945G(dev) || \
+ IS_I945GM(dev) || \
+ IS_G33(dev) || \
+ IS_PINEVIEW(dev))
+#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E42)
#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c
index 16fca1d1799a..0758c7802e6b 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem.c
@@ -34,9 +34,7 @@
#include
#include
#include
-#include
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -50,6 +48,8 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
@@ -58,14 +58,6 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
-static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
-{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
-}
-
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -136,15 +128,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
return ret;
- }
-
- /* Sink the floating reference from kref_init(handlecount) */
- drm_gem_object_handle_unreference_unlocked(obj);
args->handle = handle;
+
return 0;
}
@@ -324,8 +313,7 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -1048,11 +1036,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1154,7 +1137,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
@@ -1172,6 +1155,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
@@ -1184,9 +1169,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1381,6 +1363,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@@ -1426,6 +1409,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
@@ -1509,16 +1493,9 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
- */
inode = obj->filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
obj_priv->madv = __I915_MADV_PURGED;
}
@@ -1910,6 +1887,19 @@ i915_gem_flush(struct drm_device *dev,
flush_domains);
}
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+{
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+ ring->flush(dev, ring,
+ invalidate_domains,
+ flush_domains);
+}
+
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
@@ -1983,6 +1973,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* cause memory corruption through use-after-free.
*/
+ BUG_ON(obj_priv->active);
+
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
@@ -2018,7 +2010,34 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
-int
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+}
+
+static int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2059,6 +2078,155 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ int ret;
+
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
+ for (;;) {
+ i915_gem_retire_requests(dev);
+
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ obj_priv = to_intel_bo(obj);
+ BUG_ON(obj_priv->pin_count != 0);
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ return i915_gem_object_unbind(obj);
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
+ */
+ if (!list_empty(&render_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&render_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&bsd_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = &obj_priv->base;
+ if (obj->size >= min_size)
+ break;
+
+ obj = NULL;
+ }
+
+ if (obj != NULL) {
+ uint32_t seqno;
+
+ i915_gem_flush_ring(dev,
+ obj->write_domain,
+ obj->write_domain,
+ obj_priv->ring);
+ seqno = i915_add_request(dev, NULL,
+ obj->write_domain,
+ obj_priv->ring);
+ if (seqno == 0)
+ return -ENOMEM;
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
+ }
+}
+
int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
@@ -2498,7 +2666,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -2516,8 +2684,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2547,7 +2714,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -2556,9 +2723,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -2953,7 +3117,6 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
@@ -3016,13 +3179,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- if (obj_priv->ring == &dev_priv->render_ring)
- dev_priv->flush_rings |= FLUSH_RENDER_RING;
- else if (obj_priv->ring == &dev_priv->bsd_ring)
- dev_priv->flush_rings |= FLUSH_BSD_RING;
- }
-
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
@@ -3562,6 +3718,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->render_ring;
}
+
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -3589,7 +3746,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_ERROR("copy %d cliprects failed: %d\n",
args->num_cliprects, ret);
- ret = -EFAULT;
goto pre_mutex_err;
}
}
@@ -3736,7 +3892,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
- dev_priv->flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3757,14 +3912,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
- if (dev_priv->flush_rings & FLUSH_BSD_RING)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
(void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
+ dev->flush_domains,
+ &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
+ }
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4035,10 +4192,6 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -4060,7 +4213,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
+ !list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4205,34 +4359,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
-
- /* Count all active objects as busy, even if they are currently not used
- * by the gpu. Users of this interface expect objects to eventually
- * become non-busy without any further actions, therefore emit any
- * necessary flushes here.
+ /* Update the active list for the hardware's current position.
+ * Otherwise this only updates on a delayed timer or when irqs are
+ * actually unmasked, and our working set ends up being larger than
+ * required.
*/
- obj_priv = to_intel_bo(obj);
- args->busy = obj_priv->active;
- if (args->busy) {
- /* Unconditionally flush objects, even when the gpu still uses this
- * object. Userspace calling this function indicates that it wants to
- * use this buffer rather sooner than later, so issuing the required
- * flush earlier is beneficial.
- */
- if (obj->write_domain) {
- i915_gem_flush(dev, 0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
- }
-
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs
- * are actually unmasked, and our working set ends up being
- * larger than required.
- */
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests(dev);
- args->busy = obj_priv->active;
- }
+ obj_priv = to_intel_bo(obj);
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4372,6 +4514,30 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_free_object_tail(obj);
}
+/** Unbinds all inactive objects. */
+static int
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
@@ -4396,7 +4562,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_from_inactive_list(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4514,8 +4680,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
- dev_priv->next_seqno = 1;
-
return 0;
cleanup_render_ring:
@@ -4677,7 +4841,7 @@ i915_gem_load(struct drm_device *dev)
* e.g. for cursor + overlay regs
*/
int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
+ int id, int size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4692,7 +4856,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, align);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4774,9 +4938,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
- int id,
- int align)
+ struct drm_gem_object *obj, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
@@ -4795,10 +4957,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
i915_gem_detach_phys_object(dev, obj);
}
+
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->size);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
goto out;
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c b/trunk/drivers/gpu/drm/i915/i915_gem_evict.c
deleted file mode 100644
index 72cae3cccad8..000000000000
--- a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright © 2008-2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt
- * Chris Wilson
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drv.h"
-#include "i915_drm.h"
-
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
- struct list_head **render_iter,
- struct list_head **bsd_iter)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
- if (*render_iter != &dev_priv->render_ring.active_list)
- render_obj = list_entry(*render_iter,
- struct drm_i915_gem_object,
- list);
-
- if (HAS_BSD(dev)) {
- if (*bsd_iter != &dev_priv->bsd_ring.active_list)
- bsd_obj = list_entry(*bsd_iter,
- struct drm_i915_gem_object,
- list);
-
- if (render_obj == NULL) {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
-
- if (bsd_obj == NULL) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-
- /* XXX can we handle seqno wrapping? */
- if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- } else {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
- } else {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-}
-
-static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
-{
- list_add(&obj_priv->evict_list, unwind);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
-}
-
-#define i915_for_each_active_object(OBJ, R, B) \
- *(R) = dev_priv->render_ring.active_list.next; \
- *(B) = dev_priv->bsd_ring.active_list.next; \
- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
-int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
- struct list_head *render_iter, *bsd_iter;
- int ret = 0;
-
- i915_gem_retire_requests(dev);
-
- /* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
-
- /*
- * The goal is to evict objects and amalgamate space in LRU order.
- * The oldest idle objects reside on the inactive list, which is in
- * retirement order. The next objects to retire are those on the (per
- * ring) active list that do not have an outstanding flush. Once the
- * hardware reports completion (the seqno is updated after the
- * batchbuffer has been finished) the clean buffer objects would
- * be retired to the inactive list. Any dirty objects would be added
- * to the tail of the flushing list. So after processing the clean
- * active objects we need to emit a MI_FLUSH to retire the flushing
- * list, hence the retirement order of the flushing list is in
- * advance of the dirty objects on the active lists.
- *
- * The retirement sequence is thus:
- * 1. Inactive objects (already retired)
- * 2. Clean active objects
- * 3. Flushing list
- * 4. Dirty active objects.
- *
- * On each list, the oldest objects lie at the HEAD with the freshest
- * object on the TAIL.
- */
-
- INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
-
- /* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Now merge in the soon-to-be-expired objects... */
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
- /* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- if (obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
- BUG_ON(ret);
- }
-
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_everything() is unnecessary.
- */
- return -ENOSPC;
-
-found:
- INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &unwind_list, evict_list) {
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- /* drm_mm doesn't allow any other other operations while
- * scanning, therefore store to be evicted objects on a
- * temporary list. */
- list_move(&obj_priv->evict_list, &eviction_list);
- }
- }
-
- /* Unbinding will emit any required flushes */
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &eviction_list, evict_list) {
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- ret = i915_gem_object_unbind(&obj_priv->base);
- if (ret)
- return ret;
- }
-
- /* The just created free hole should be on the top of the free stack
- * maintained by drm_mm, so this BUG_ON actually executes in O(1).
- * Furthermore all accessed data has just recently been used, so it
- * should be really fast, too. */
- BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
- alignment, 0));
-
- return 0;
-}
-
-int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- bool lists_empty;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- if (lists_empty)
- return -ENOSPC;
-
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
- BUG_ON(!lists_empty);
-
- return 0;
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..85785a8844ed 100644
--- a/trunk/drivers/gpu/drm/i915/i915_irq.c
+++ b/trunk/drivers/gpu/drm/i915/i915_irq.c
@@ -425,11 +425,9 @@ static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count;
- u32 reloc_offset;
if (src == NULL)
return NULL;
@@ -444,27 +442,18 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
for (page = 0; page < page_count; page++) {
+ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
unsigned long flags;
- void __iomem *s;
- void *d;
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
-
local_irq_save(flags);
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset,
- KM_IRQ0);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s, KM_IRQ0);
+ s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s, KM_IRQ0);
local_irq_restore(flags);
-
dst->pages[page] = d;
-
- reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset;
@@ -500,7 +489,6 @@ i915_error_state_free(struct drm_device *dev,
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
kfree(error->active_bo);
- kfree(error->overlay);
kfree(error);
}
@@ -624,57 +612,18 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd < obj_priv->gtt_offset + obj->size &&
+ batchbuffer[0] != obj)
batchbuffer[1] = obj;
count++;
}
- /* Scan the other lists for completeness for those bizarre errors. */
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userpace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- if (batchbuffer[1] != batchbuffer[0])
- error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
- else
- error->batchbuffer[1] = NULL;
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
@@ -718,8 +667,6 @@ static void i915_capture_error_state(struct drm_device *dev)
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
-
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
@@ -887,49 +834,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
queue_work(dev_priv->wq, &dev_priv->error_work);
}
-static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj_priv;
- struct intel_unpin_work *work;
- unsigned long flags;
- bool stall_detected;
-
- /* Ignore early vblank irqs */
- if (intel_crtc == NULL)
- return;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->unpin_work;
-
- if (work == NULL || work->pending || !work->enable_stall_check) {
- /* Either the pending flip IRQ arrived, or we're too early. Don't check */
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return;
- }
-
- /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- obj_priv = to_intel_bo(work->pending_flip_obj);
- if(IS_I965G(dev)) {
- int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
- } else {
- int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
- crtc->y * crtc->fb->pitch +
- crtc->x * crtc->fb->bits_per_pixel/8);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- if (stall_detected) {
- DRM_DEBUG_DRIVER("Pageflip stall detected\n");
- intel_prepare_page_flip(dev, intel_crtc->plane);
- }
-}
-
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -1047,19 +951,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 0);
+ if (!dev_priv->flip_pending_is_done)
intel_finish_page_flip(dev, 0);
- }
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 1);
+ if (!dev_priv->flip_pending_is_done)
intel_finish_page_flip(dev, 1);
- }
}
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1350,25 +1250,7 @@ void i915_hangcheck_elapsed(unsigned long data)
i915_seqno_passed(i915_get_gem_seqno(dev,
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
- bool missed_wakeup = false;
-
dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->bsd_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (missed_wakeup)
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
return;
}
@@ -1436,17 +1318,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
- if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
-
+ /* user interrupt should be enabled, but masked initial */
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
diff --git a/trunk/drivers/gpu/drm/i915/i915_opregion.c b/trunk/drivers/gpu/drm/i915/i915_opregion.c
index ea5d3fea4b61..d1bf92b99788 100644
--- a/trunk/drivers/gpu/drm/i915/i915_opregion.c
+++ b/trunk/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,6 +114,10 @@ struct opregion_asle {
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAIL (2<<10)
+#define ASLE_BACKLIGHT_FAIL (2<<12)
+#define ASLE_PFIT_FAIL (2<<14)
+#define ASLE_PWM_FREQ_FAIL (2<<16)
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
@@ -151,11 +155,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
+ return ASLE_BACKLIGHT_FAIL;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
+ return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -207,7 +211,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAILED;
+ return ASLE_PFIT_FAIL;
return 0;
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..281db6e5403a 100644
--- a/trunk/drivers/gpu/drm/i915/i915_reg.h
+++ b/trunk/drivers/gpu/drm/i915/i915_reg.h
@@ -170,7 +170,6 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
@@ -181,12 +180,6 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
-#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
-#define MI_MM_SPACE_GTT (1<<8)
-#define MI_MM_SPACE_PHYSICAL (0<<8)
-#define MI_SAVE_EXT_STATE_EN (1<<3)
-#define MI_RESTORE_EXT_STATE_EN (1<<2)
-#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -319,7 +312,6 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 11)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
@@ -1107,11 +1099,6 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
-/*
- * Logical Context regs
- */
-#define CCID 0x2180
-#define CCID_EN (1<<0)
/*
* Overlay regs
*/
@@ -2082,7 +2069,6 @@
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
@@ -2206,17 +2192,9 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_LP1_MASK (0xf<<20)
-#define WM1_LP_FBC_LP1_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
-#define WM2_LP_ILK 0x4510c
-#define WM2_LP_EN (1<<31)
-#define WM3_LP_ILK 0x45110
-#define WM3_LP_EN (1<<31)
-#define WM1S_LP_ILK 0x45120
-#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
#define MLTR_ILK 0x11222
@@ -2950,7 +2928,6 @@
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
/* SNB eDP training params */
/* SNB A-stepping */
diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f2440e..6e2025274db5 100644
--- a/trunk/drivers/gpu/drm/i915/i915_suspend.c
+++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -395,20 +395,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
+ DRM_UDELAY(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ DRM_UDELAY(150);
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(DPLL_A_MD);
- }
- udelay(150);
+ DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -417,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -464,20 +460,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
+ DRM_UDELAY(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ DRM_UDELAY(150);
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(DPLL_B_MD);
- }
- udelay(150);
+ DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -486,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -554,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -579,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -610,7 +602,7 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
@@ -626,7 +618,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -668,24 +660,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -716,7 +708,7 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
@@ -731,15 +723,14 @@ void i915_restore_display(struct drm_device *dev)
}
}
/* VGA state */
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
+ DRM_UDELAY(150);
i915_restore_vga(dev);
}
@@ -757,7 +748,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -771,7 +762,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
/* Cache mode state */
@@ -829,7 +820,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -844,7 +835,7 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_enable_drps(dev);
/* Cache mode state */
diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c
index 4b7735196cd5..ee0732b222a1 100644
--- a/trunk/drivers/gpu/drm/i915/intel_crt.c
+++ b/trunk/drivers/gpu/drm/i915/intel_crt.c
@@ -160,20 +160,19 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
bool ret;
- bool turn_off_dac = false;
temp = adpa = I915_READ(PCH_ADPA);
- if (HAS_PCH_SPLIT(dev))
- turn_off_dac = true;
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- if (turn_off_dac)
- adpa &= ~ADPA_DAC_ENABLE;
-
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -186,11 +185,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1))
- DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+ while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
+ ;
- if (turn_off_dac) {
+ if (HAS_PCH_CPT(dev)) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
@@ -239,13 +237,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
- if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1))
- DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+ do {
+ if (!(I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -329,7 +331,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -506,8 +508,17 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
+static void intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_crt_enc_destroy,
};
void intel_crt_init(struct drm_device *dev)
diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c
index 19daead5b525..5ec10e02341b 100644
--- a/trunk/drivers/gpu/drm/i915/intel_display.c
+++ b/trunk/drivers/gpu/drm/i915/intel_display.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
@@ -977,70 +976,14 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe. Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
-
- /* Clear existing vblank status. Note this will clear any other
- * sticky status fields as well.
- *
- * This races with i915_driver_irq_handler() with the result
- * that either function could miss a vblank event. Here it is not
- * fatal, as we will either wait upon the next vblank interrupt or
- * timeout. Generally speaking intel_wait_for_vblank() is only
- * called during modeset at which time the GPU should be idle and
- * should *not* be performing page flips and thus not waiting on
- * vblanks...
- * Currently, the result of us stealing a vblank from the irq
- * handler is that a single frame will be skipped during swapbuffers.
- */
- I915_WRITE(pipestat_reg,
- I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
- /* Wait for vblank interrupt bit to set */
- if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS),
- 50, 0))
- DRM_DEBUG_KMS("vblank wait timed out\n");
-}
-
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * After disabling a pipe, we can't wait for vblank in the usual way,
- * spinning on the vblank interrupt status bit, since we won't actually
- * see an interrupt when the pipe is disabled.
- *
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
- */
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+void
+intel_wait_for_vblank(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
- u32 last_line;
-
- /* Wait for the display line to settle */
- do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
- mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
- time_after(timeout, jiffies));
-
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("vblank wait timed out\n");
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ if (in_dbg_master())
+ mdelay(20); /* The kernel debugger cannot call msleep() */
+ else
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -1094,6 +1037,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
@@ -1108,11 +1052,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("FBC idle timed out\n");
+ break;
+ }
+ ; /* do nothing */
}
+ intel_wait_for_vblank(dev);
+
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1169,6 +1118,7 @@ void g4x_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1229,6 +1179,7 @@ void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1502,7 +1453,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1511,22 +1462,23 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
I915_WRITE(dspstride, fb->pitch);
if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
}
- POSTING_READ(dspbase);
- if (IS_I965G(dev) || plane == 0)
+ if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
intel_increase_pllclock(crtc, true);
return 0;
@@ -1537,6 +1489,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
@@ -1544,6 +1497,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
int ret;
/* no fb bound */
@@ -1579,18 +1539,73 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
- if (ret) {
+ dspcntr = I915_READ(dspcntr_reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ if (crtc->fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth\n");
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return -EINVAL;
+ }
+ if (IS_I965G(dev)) {
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
}
+ if (HAS_PCH_SPLIT(dev))
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(dspcntr_reg, dspcntr);
+
+ Start = obj_priv->gtt_offset;
+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, crtc->fb->pitch);
+ I915_WRITE(dspstride, crtc->fb->pitch);
+ if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
+ I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
+ I915_WRITE(dsptileoff, (y << 16) | x);
+ } else {
+ I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
+ }
+
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
+ intel_wait_for_vblank(dev);
+
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
+ intel_increase_pllclock(crtc, true);
mutex_unlock(&dev->struct_mutex);
@@ -1612,6 +1627,54 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
+ return;
+
+ I915_WRITE8(VGA_SR_INDEX, 1);
+ sr1 = I915_READ8(VGA_SR_DATA);
+ I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
+ udelay(100);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+}
+
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+}
+
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ udelay(200);
+}
+
+
static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
@@ -1865,6 +1928,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
+ int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
+ int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
+ int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1879,6 +1945,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1891,7 +1958,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
@@ -1901,7 +1968,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- if (!HAS_eDP) {
+ if (HAS_eDP) {
+ /* enable eDP PLL */
+ ironlake_enable_pll_edp(crtc);
+ } else {
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1933,19 +2003,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc))) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc)) {
+ temp = I915_READ(pf_ctl_reg);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
+
+ /* currently full aspect */
+ I915_WRITE(pf_win_pos, 0);
+
+ I915_WRITE(pf_win_size,
+ (dev_priv->panel_fixed_mode->hdisplay << 16) |
+ (dev_priv->panel_fixed_mode->vdisplay));
}
/* Enable CPU pipe */
@@ -2029,10 +2097,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int reg;
reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
- reg |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2070,17 +2137,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
- DRM_ERROR("failed to enable transcoder\n");
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
+ ;
+
}
intel_crtc_load_lut(crtc);
intel_update_fbc(crtc, &crtc->mode);
- break;
+ break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
drm_vblank_off(dev, pipe);
/* Disable display plane */
@@ -2096,22 +2164,40 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ i915_disable_vga(dev);
+
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-
+ I915_READ(pipeconf_reg);
+ n = 0;
/* wait for cpu pipe off, pipe state */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
- DRM_ERROR("failed to turn off cpu pipe\n");
+ while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
+ n++;
+ if (n < 60) {
+ udelay(500);
+ continue;
+ } else {
+ DRM_DEBUG_KMS("pipe %d off delay\n",
+ pipe);
+ break;
+ }
+ }
} else
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
udelay(100);
/* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
+ }
+ I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -2158,10 +2244,20 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
-
+ I915_READ(transconf_reg);
+ n = 0;
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
- DRM_ERROR("failed to disable transcoder\n");
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
+ n++;
+ if (n < 60) {
+ udelay(500);
+ continue;
+ } else {
+ DRM_DEBUG_KMS("transcoder %d off "
+ "delay\n", pipe);
+ break;
+ }
+ }
}
temp = I915_READ(transconf_reg);
@@ -2198,6 +2294,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(pch_dpll_reg);
+ if (HAS_eDP) {
+ ironlake_disable_pll_edp(crtc);
+ }
+
/* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
@@ -2272,6 +2372,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ intel_update_watermarks(dev);
+
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2311,6 +2413,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
+ intel_update_watermarks(dev);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, false);
drm_vblank_off(dev, pipe);
@@ -2319,6 +2423,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ /* Disable the VGA plane that we never use */
+ i915_disable_vga(dev);
+
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2328,8 +2435,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank_off(dev, pipe);
+ if (!IS_I9XX(dev)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev);
+ }
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
@@ -2344,7 +2453,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank_off(dev, pipe);
+ intel_wait_for_vblank(dev);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2360,6 +2469,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/**
* Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
@@ -2370,29 +2482,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = intel_crtc->pipe;
bool enabled;
- if (intel_crtc->dpms_mode == mode)
- return;
-
- intel_crtc->dpms_mode = mode;
- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
- /* When switching on the display, ensure that SR is disabled
- * with multiple pipes prior to enabling to new pipe.
- *
- * When switching off the display, make sure the cursor is
- * properly hidden prior to disabling the pipe.
- */
- if (mode == DRM_MODE_DPMS_ON)
- intel_update_watermarks(dev);
- else
- intel_crtc_update_cursor(crtc);
-
dev_priv->display.dpms(crtc, mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_crtc_update_cursor(crtc);
- else
- intel_update_watermarks(dev);
+ intel_crtc->dpms_mode = mode;
if (!dev->primary->master)
return;
@@ -2444,20 +2536,6 @@ void intel_encoder_commit (struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2767,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
- if (wm_size <= 0)
+ if (wm_size <= 0) {
wm_size = wm->default_wm;
+ DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
+ " entries required = %ld, available = %lu.\n",
+ entries_required + wm->guard_size,
+ wm->fifo_size);
+ }
+
return wm_size;
}
@@ -2783,7 +2867,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
-static const struct cxsr_latency cxsr_latency_table[] = {
+static struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
@@ -2821,13 +2905,11 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
+ int fsb, int mem)
{
- const struct cxsr_latency *latency;
int i;
+ struct cxsr_latency *latency;
if (fsb == 0 || mem == 0)
return NULL;
@@ -2848,9 +2930,13 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
/* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ reg = I915_READ(DSPFW3);
+ reg &= ~(PINEVIEW_SELF_REFRESH_EN);
+ I915_WRITE(DSPFW3, reg);
+ DRM_INFO("Big FIFO is disabled\n");
}
/*
@@ -2938,12 +3024,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
+ struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -2989,8 +3075,9 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
@@ -3267,11 +3354,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
int line_count;
int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
/* Need htotal for all active display plane */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
if (intel_crtc->plane == 0)
planea_htotal = crtc->mode.htotal;
else
@@ -3382,7 +3470,8 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
reg_value = I915_READ(WM1_LP_ILK);
reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
WM1_LP_CURSOR_MASK);
- reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
(sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3430,6 +3519,7 @@ static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
@@ -3440,8 +3530,8 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3499,9 +3589,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *has_edp_encoder = NULL;
+ bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -3522,12 +3613,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
- if (encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3551,7 +3642,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = intel_encoder;
+ is_edp = true;
break;
}
@@ -3629,10 +3720,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder) {
+ if (is_edp) {
target_clock = mode->clock;
- intel_edp_link_config(has_edp_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(intel_encoder,
+ &lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
instead of link clock */
@@ -3653,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
+ } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
switch (dev_priv->edp_bpp/3) {
case 8:
temp |= PIPE_8BPC;
@@ -3726,7 +3817,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(200);
- if (has_edp_encoder) {
+ if (is_edp) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3875,7 +3966,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll_reg = pch_dpll_reg;
}
- if (!has_edp_encoder) {
+ if (is_edp) {
+ ironlake_disable_pll_edp(crtc);
+ } else if ((dpll & DPLL_VCO_ENABLE)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
@@ -3970,7 +4063,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (!has_edp_encoder) {
+ if (!is_edp) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
I915_READ(dpll_reg);
@@ -4049,7 +4142,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
- if (has_edp_encoder) {
+ if (is_edp) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
@@ -4074,7 +4167,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
@@ -4087,6 +4180,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -4120,62 +4216,6 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool visible = base != 0;
- u32 cntl;
-
- if (intel_crtc->cursor_visible == visible)
- return;
-
- cntl = I915_READ(CURACNTR);
- if (visible) {
- /* On these chipsets we can only modify the base whilst
- * the cursor is disabled.
- */
- I915_WRITE(CURABASE, base);
-
- cntl &= ~(CURSOR_FORMAT_MASK);
- /* XXX width must be 64, stride 256 => 0x00 << 28 */
- cntl |= CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB;
- } else
- cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(CURACNTR, cntl);
-
- intel_crtc->cursor_visible = visible;
-}
-
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- bool visible = base != 0;
-
- if (intel_crtc->cursor_visible != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
- if (base) {
- cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- cntl |= pipe << 28; /* Connect to correct pipe */
- } else {
- cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- cntl |= CURSOR_MODE_DISABLE;
- }
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
-
- intel_crtc->cursor_visible = visible;
- }
- /* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
-}
-
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc)
{
@@ -4185,12 +4225,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
- u32 base, pos;
+ uint32_t base, pos;
bool visible;
pos = 0;
- if (intel_crtc->cursor_on && crtc->fb) {
+ if (crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4219,14 +4259,37 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
- if (!visible && !intel_crtc->cursor_visible)
+ if (!visible && !intel_crtc->cursor_visble)
return;
I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
- if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
- else
- i9xx_update_cursor(crtc, base);
+ if (intel_crtc->cursor_visble != visible) {
+ uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ if (base) {
+ /* Hooray for CUR*CNTR differences */
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ cntl |= CURSOR_ENABLE;
+ cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+ }
+ } else {
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ } else {
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ }
+ }
+ I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+ intel_crtc->cursor_visble = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -4291,10 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, bo,
- (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
- align);
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
@@ -4483,7 +4544,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
encoder_funcs->commit(encoder);
}
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
return crtc;
}
@@ -4688,7 +4749,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4732,7 +4793,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -4855,6 +4916,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+};
+
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
@@ -4942,8 +5012,7 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- if ((++intel_crtc->unpin_work->pending) > 1)
- DRM_ERROR("Prepared flip multiple times\n");
+ intel_crtc->unpin_work->pending = 1;
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
@@ -4962,9 +5031,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
- int pipe = intel_crtc->pipe;
- u32 pf, pipesrc;
- int ret;
+ int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int ret, pipesrc;
+ u32 flip_mask;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -5013,73 +5082,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- u32 flip_mask;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
+ if (intel_crtc->plane)
+ flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ else
+ flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
- work->enable_stall_check = true;
+ /* Wait for any previous flip to finish */
+ if (IS_GEN3(dev))
+ while (I915_READ(ISR) & flip_mask)
+ ;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = obj_priv->gtt_offset;
+ offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
BEGIN_LP_RING(4);
- switch(INTEL_INFO(dev)->gen) {
- case 2:
+ if (IS_I965G(dev)) {
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 3:
+ OUT_RING(offset | obj_priv->tiling_mode);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
+ } else {
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(offset);
OUT_RING(MI_NOOP);
- break;
-
- case 4:
- case 5:
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
-
- case 6:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj_priv->tiling_mode);
- OUT_RING(obj_priv->gtt_offset);
-
- pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
}
ADVANCE_LP_RING();
@@ -5160,7 +5190,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = -1;
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -5402,37 +5432,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
};
static struct drm_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
+intel_alloc_power_context(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_gem_object *pwrctx;
int ret;
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
+ pwrctx = i915_gem_alloc_object(dev, 4096);
+ if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(pwrctx, 4096);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
mutex_unlock(&dev->struct_mutex);
- return ctx;
+ return pwrctx;
err_unpin:
- i915_gem_object_unpin(ctx);
+ i915_gem_object_unpin(pwrctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(pwrctx);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5464,6 +5494,7 @@ void ironlake_enable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ int i = 0;
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
@@ -5507,8 +5538,13 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
- DRM_ERROR("stuck trying to change perf mode\n");
+ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
+ if (i++ > 100) {
+ DRM_ERROR("stuck trying to change perf mode\n");
+ break;
+ }
+ msleep(1);
+ }
msleep(1);
ironlake_set_drps(dev, fstart);
@@ -5668,9 +5704,6 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5735,29 +5768,6 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
- }
- } else
- DRM_DEBUG_KMS("Failed to allocate render context."
- "Disable RC6\n");
- }
-
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL;
@@ -5766,7 +5776,7 @@ void intel_init_clock_gating(struct drm_device *dev)
} else {
struct drm_gem_object *pwrctx;
- pwrctx = intel_alloc_context_page(dev);
+ pwrctx = intel_alloc_power_context(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
@@ -5938,29 +5948,6 @@ static void intel_init_quirks(struct drm_device *dev)
}
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
-
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
-}
-
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6009,9 +5996,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
- /* Just disable it once at startup */
- i915_disable_vga(dev);
-
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
@@ -6050,16 +6034,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c
index 51d142939a26..40be1fa65be1 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dp.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dp.c
@@ -42,11 +42,10 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
+#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
-struct intel_dp {
- struct intel_encoder base;
+struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -55,39 +54,40 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
+ struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
};
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
-}
+static void
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
-static void intel_dp_link_train(struct intel_dp *intel_dp);
-static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
void
intel_edp_link_config (struct intel_encoder *intel_encoder,
- int *lane_num, int *link_bw)
+ int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- *lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
+ *lane_num = dp_priv->lane_count;
+ if (dp_priv->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
+ else if (dp_priv->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
+intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
- if (intel_dp->dpcd[0] >= 0x11) {
- max_lane_count = intel_dp->dpcd[2] & 0x1f;
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -99,9 +99,10 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
}
static int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
{
- int max_link_bw = intel_dp->dpcd[1];
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -125,11 +126,13 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct drm_device *dev,
+ struct intel_encoder *intel_encoder, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -146,13 +149,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
+ int max_lanes = intel_dp_max_lane_count(intel_encoder);
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -163,8 +167,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_dp) &&
- (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
+ if (!IS_eDP(intel_encoder) &&
+ (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -228,17 +232,19 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
+intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
+ uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
@@ -247,7 +253,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -262,43 +268,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
precharge = 5;
- if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
- DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
- return -EBUSY;
- }
-
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
+ for (i = 0; i < send_bytes; i += 4) {
+ uint32_t d = pack_aux(send + i, send_bytes - i);
+
+ I915_WRITE(ch_data + i, d);
+ }
+
+ ctl = (DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ I915_WRITE(ch_ctl, ctl);
+ (void) I915_READ(ch_ctl);
for (;;) {
+ udelay(100);
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
- udelay(100);
}
/* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
- if (status & DP_AUX_CH_CTL_DONE)
+ I915_WRITE(ch_ctl, (status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR));
+ (void) I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
break;
}
@@ -325,19 +329,22 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
if (recv_bytes > recv_size)
recv_bytes = recv_size;
- for (i = 0; i < recv_bytes; i += 4)
- unpack_aux(I915_READ(ch_data + i),
- recv + i, recv_bytes - i);
+ for (i = 0; i < recv_bytes; i += 4) {
+ uint32_t d = I915_READ(ch_data + i);
+
+ unpack_aux(d, recv + i, recv_bytes - i);
+ }
return recv_bytes;
}
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_dp *intel_dp,
+intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -354,7 +361,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -369,15 +376,15 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_dp *intel_dp,
+intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -396,7 +403,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -419,9 +426,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp *intel_dp = container_of(adapter,
- struct intel_dp,
- adapter);
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -460,7 +468,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_dp,
+ ret = intel_dp_aux_ch(intel_encoder,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -488,42 +496,57 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_dp *intel_dp,
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector, const char *name)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+
DRM_DEBUG_KMS("i2c_init %s\n", name);
- intel_dp->algo.running = false;
- intel_dp->algo.address = 0;
- intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
-
- memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
- intel_dp->adapter.owner = THIS_MODULE;
- intel_dp->adapter.class = I2C_CLASS_DDC;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
- intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
-
- return i2c_dp_aux_add_bus(&intel_dp->adapter);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
+ dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
- int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_encoder);
+ int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
dev_priv->panel_fixed_mode) {
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
- mode, adjusted_mode);
+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
@@ -535,33 +558,31 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
<= link_avail) {
- intel_dp->link_bw = bws[clock];
- intel_dp->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
+ dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
}
}
}
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
/* okay we failed just pick the highest */
- intel_dp->lane_count = max_lane_count;
- intel_dp->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ dp_priv->lane_count = max_lane_count;
+ dp_priv->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
+ dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
-
return true;
}
-
return false;
}
@@ -605,14 +626,17 @@ bool intel_pch_has_edp(struct drm_crtc *crtc)
struct drm_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->is_pch_edp;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
+ return dp_priv->is_pch_edp;
}
return false;
}
@@ -633,15 +657,18 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
if (encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- lane_count = intel_dp->lane_count;
- if (IS_PCH_eDP(intel_dp))
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ if (IS_PCH_eDP(dp_priv))
bpp = dev_priv->edp_bpp;
break;
}
@@ -697,114 +724,107 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_dp->DP = (DP_VOLTAGE_0_4 |
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
else
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
- switch (intel_dp->lane_count) {
+ switch (dp_priv->lane_count) {
case 1:
- intel_dp->DP |= DP_PORT_WIDTH_1;
+ dp_priv->DP |= DP_PORT_WIDTH_1;
break;
case 2:
- intel_dp->DP |= DP_PORT_WIDTH_2;
+ dp_priv->DP |= DP_PORT_WIDTH_2;
break;
case 4:
- intel_dp->DP |= DP_PORT_WIDTH_4;
+ dp_priv->DP |= DP_PORT_WIDTH_4;
break;
}
- if (intel_dp->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- intel_dp->DP |= DP_ENHANCED_FRAMING;
+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- intel_dp->DP |= DP_PIPEB_SELECT;
+ dp_priv->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
+ dp_priv->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
- intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ dp_priv->DP |= DP_PLL_FREQ_160MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ dp_priv->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
- if (I915_READ(PCH_PP_STATUS) & PP_ON)
+ pp_status = I915_READ(PCH_PP_STATUS);
+ if (pp_status & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
-
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
- DRM_ERROR("panel on wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
pp = I915_READ(PCH_PP_CONTROL);
-
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
- DRM_ERROR("panel off wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel off wait timed out\n");
/* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+ pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -829,87 +849,33 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("\n");
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
-}
-
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
- udelay(200);
-}
-
-static void intel_dp_prepare(struct drm_encoder *encoder)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-
- if (IS_eDP(intel_dp)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(dev);
- ironlake_edp_pll_on(encoder);
- }
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
-}
-
-static void intel_dp_commit(struct drm_encoder *encoder)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_dp);
- }
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_backlight_on(dev);
-}
-
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_off(dev);
+ if (dp_reg & DP_PORT_EN) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
+ }
}
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
ironlake_edp_panel_on(dev);
- intel_dp_link_train(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
+ }
}
}
- intel_dp->dpms_mode = mode;
+ dp_priv->dpms_mode = mode;
}
/*
@@ -917,12 +883,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
+intel_dp_get_link_status(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_dp,
+ ret = intel_dp_aux_native_read(intel_encoder,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -999,7 +965,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
+intel_get_adjust_train(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -1135,27 +1101,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_dp *intel_dp,
+intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
- I915_WRITE(intel_dp->output_reg, dp_reg_value);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
if (first)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
- intel_dp_aux_native_write_1(intel_dp,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
+ ret = intel_dp_aux_native_write(intel_encoder,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -1164,10 +1130,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
@@ -1177,15 +1145,13 @@ intel_dp_link_train(struct intel_dp *intel_dp)
bool first = true;
int tries;
u32 reg;
- uint32_t DP = intel_dp->DP;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1196,39 +1162,39 @@ intel_dp_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
+ for (i = 0; i < dp_priv->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count)
+ if (i == dp_priv->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
@@ -1241,7 +1207,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
}
/* channel equalization */
@@ -1251,30 +1217,30 @@ intel_dp_link_train(struct intel_dp *intel_dp)
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
channel_eq = true;
break;
}
@@ -1284,53 +1250,53 @@ intel_dp_link_train(struct intel_dp *intel_dp)
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
++tries;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
+ I915_WRITE(dp_priv->output_reg, reg);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_dp *intel_dp)
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t DP = intel_dp->DP;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP);
+ POSTING_READ(dp_priv->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
}
udelay(17000);
- if (IS_eDP(intel_dp))
+ if (IS_eDP(intel_encoder))
DP |= DP_LINK_TRAIN_OFF;
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
}
/*
@@ -1343,39 +1309,41 @@ intel_dp_link_down(struct intel_dp *intel_dp)
*/
static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+intel_dp_check_link_status(struct intel_encoder *intel_encoder)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->base.enc.crtc)
+ if (!intel_encoder->enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- intel_dp_link_down(intel_dp);
+ if (!intel_dp_get_link_status(intel_encoder, link_status)) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
return;
}
- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
- intel_dp_link_train(intel_dp);
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ if (intel_dp_aux_native_read(intel_encoder,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
- intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
return status;
}
@@ -1389,18 +1357,19 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
- intel_dp->has_audio = false;
+ dp_priv->has_audio = false;
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- switch (intel_dp->output_reg) {
+ switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
@@ -1420,11 +1389,11 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ if (intel_dp_aux_native_read(intel_encoder,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
@@ -1433,17 +1402,18 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret) {
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
!dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
@@ -1460,7 +1430,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1482,9 +1452,9 @@ intel_dp_destroy (struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
+ .prepare = intel_encoder_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1500,17 +1470,27 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dp_enc_destroy,
};
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_dp);
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_encoder);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1520,18 +1500,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
if (encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->output_reg;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
}
-
return -1;
}
@@ -1560,28 +1540,30 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ struct intel_dp_priv *dp_priv;
const char *name = NULL;
int type;
- intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
- if (!intel_dp)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_dp);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_dp->base;
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
+
+ if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
if (intel_dpd_is_edp(dev))
- intel_dp->is_pch_edp = true;
+ dp_priv->is_pch_edp = true;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
@@ -1602,16 +1584,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_dp))
+ if (IS_eDP(intel_encoder))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp->output_reg = output_reg;
- intel_dp->has_audio = false;
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+ dp_priv->intel_encoder = intel_encoder;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_encoder->dev_priv = dp_priv;
drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -1646,12 +1630,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
- intel_encoder->ddc_bus = &intel_dp->adapter;
+ intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h
index ad312ca6b3e5..b2190148703a 100644
--- a/trunk/drivers/gpu/drm/i915/intel_drv.h
+++ b/trunk/drivers/gpu/drm/i915/intel_drv.h
@@ -32,20 +32,6 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
-
-#define wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
- int ret__ = 0; \
- while (! (COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- if (W) msleep(W); \
- } \
- ret__; \
-})
-
/*
* Display related stuff
*/
@@ -116,6 +102,7 @@ struct intel_encoder {
struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
+ void *dev_priv;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
@@ -123,6 +110,7 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
+ void *dev_priv;
};
struct intel_crtc;
@@ -168,7 +156,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visible, cursor_on;
+ bool cursor_visble;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -176,16 +164,6 @@ struct intel_crtc {
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-struct intel_unpin_work {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
- struct drm_pending_vblank_event *event;
- int pending;
- bool enable_stall_check;
-};
-
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -210,18 +188,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct drm_device *dev,
- int fitting_mode,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
@@ -229,8 +199,7 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
diff --git a/trunk/drivers/gpu/drm/i915/intel_dvo.c b/trunk/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b2c1c5..227feca7cf8d 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dvo.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
-static const struct intel_dvo_device intel_dvo_devices[] = {
+static struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
@@ -77,33 +77,20 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
}
};
-struct intel_dvo {
- struct intel_encoder base;
-
- struct intel_dvo_device dev;
-
- struct drm_display_mode *panel_fixed_mode;
- bool panel_wants_dither;
-};
-
-static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
-}
-
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ u32 dvo_reg = dvo->dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ dvo->dev_ops->dpms(dvo, mode);
} else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ dvo->dev_ops->dpms(dvo, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
@@ -113,36 +100,38 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
- if (intel_dvo->panel_fixed_mode) {
- if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+ if (dvo->panel_fixed_mode) {
+ if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+ return dvo->dev_ops->mode_valid(dvo, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (intel_dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+ if (dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
@@ -156,8 +145,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
#undef C
}
- if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+ if (dvo->dev_ops->mode_fixup)
+ return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
return true;
}
@@ -169,10 +158,11 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = intel_crtc->pipe;
u32 dvo_val;
- u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+ u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
@@ -188,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+ dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -224,38 +214,40 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+ return dvo->dev_ops->detect(dvo);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
- if (intel_dvo->panel_fixed_mode != NULL) {
+
+ if (dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+ mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
-
return 0;
}
-static void intel_dvo_destroy(struct drm_connector *connector)
+static void intel_dvo_destroy (struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -285,20 +277,28 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
- if (intel_dvo->dev.dev_ops->destroy)
- intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
-
- kfree(intel_dvo->panel_fixed_mode);
-
- intel_encoder_destroy(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
+
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
@@ -306,13 +306,15 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode(struct drm_connector *connector)
+intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ uint32_t dvo_reg = dvo->dvo_reg;
+ uint32_t dvo_val = I915_READ(dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -325,6 +327,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
+
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -334,32 +337,28 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
}
}
}
-
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
- struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
+ struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
-
- intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
- if (!intel_dvo)
+ intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_dvo);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_dvo->base;
-
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -368,9 +367,10 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
- const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
int gpio;
+ dvo = &intel_dvo_devices[i];
+
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
@@ -393,8 +393,11 @@ void intel_dvo_init(struct drm_device *dev)
continue;
}
- intel_dvo->dev = *dvo;
- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
+ if (dvo->dev_ops!= NULL)
+ ret = dvo->dev_ops->init(dvo, i2cbus);
+ else
+ ret = false;
+
if (!ret)
continue;
@@ -426,6 +429,9 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ intel_encoder->dev_priv = dvo;
+ intel_encoder->i2c_bus = i2cbus;
+
drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_encoder->enc,
@@ -441,9 +447,9 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- intel_dvo->panel_fixed_mode =
+ dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
- intel_dvo->panel_wants_dither = true;
+ dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
@@ -455,6 +461,6 @@ void intel_dvo_init(struct drm_device *dev)
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_dvo);
+ kfree(intel_encoder);
kfree(intel_connector);
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97e6524..197887ed1823 100644
--- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,17 +37,11 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
+struct intel_hdmi_priv {
u32 sdvox_reg;
bool has_hdmi_sink;
};
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
-}
-
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -56,7 +50,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
@@ -65,7 +60,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_hdmi->has_hdmi_sink) {
+ if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
if (HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -78,25 +73,26 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= SDVO_PIPE_B_SELECT;
}
- I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 temp;
- temp = I915_READ(intel_hdmi->sdvox_reg);
+ temp = I915_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
@@ -105,15 +101,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp |= SDVO_ENABLE;
}
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ POSTING_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
}
@@ -142,17 +138,19 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- intel_hdmi->has_hdmi_sink = false;
- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ edid = drm_get_edid(connector,
+ intel_encoder->ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -164,13 +162,13 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -201,8 +199,18 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
+static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_hdmi_enc_destroy,
};
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -211,19 +219,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
+ struct intel_hdmi_priv *hdmi_priv;
- intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
- if (!intel_hdmi)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_hdmi);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_hdmi->base;
+ hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
+
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -264,7 +274,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!intel_encoder->ddc_bus)
goto err_connector;
- intel_hdmi->sdvox_reg = sdvox_reg;
+ hdmi_priv->sdvox_reg = sdvox_reg;
+ intel_encoder->dev_priv = hdmi_priv;
drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -287,7 +298,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_hdmi);
+ kfree(intel_encoder);
kfree(intel_connector);
return;
diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c
index 4fbb0165b26f..0a2e60059fb3 100644
--- a/trunk/drivers/gpu/drm/i915/intel_lvds.c
+++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,18 +41,12 @@
#include
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_priv {
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
};
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
/**
* Sets the backlight level.
*
@@ -96,7 +90,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, status_reg, lvds_reg;
+ u32 pp_status, ctl_reg, status_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
@@ -114,8 +108,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
- DRM_ERROR("timed out waiting to enable LVDS pipe");
+ do {
+ pp_status = I915_READ(status_reg);
+ } while ((pp_status & PP_ON) == 0);
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
@@ -123,8 +118,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+ do {
+ pp_status = I915_READ(status_reg);
+ } while (pp_status & PP_ON);
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
@@ -223,8 +219,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
@@ -244,20 +241,26 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* If we don't have a panel mode, there is nothing we can do */
if (dev_priv->panel_fixed_mode == NULL)
return true;
-
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
-
- if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
- mode, adjusted_mode);
- return true;
- }
+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start =
+ dev_priv->panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end =
+ dev_priv->panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start =
+ dev_priv->panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end =
+ dev_priv->panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
/* Make sure pre-965s set dither correctly */
if (!IS_I965G(dev)) {
@@ -270,6 +273,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
+ /* full screen scale for now */
+ if (HAS_PCH_SPLIT(dev))
+ goto out;
+
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -281,10 +288,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ if (!HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+ }
- switch (intel_lvds->fitting_mode) {
+ switch (lvds_priv->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -369,8 +378,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
dev_priv->lvds_border_bits = border;
/*
@@ -418,7 +427,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -434,8 +444,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -590,17 +600,18 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
- if (intel_lvds->fitting_mode == value) {
+ if (lvds_priv->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ lvds_priv->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -636,8 +647,19 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.destroy = intel_lvds_destroy,
};
+
+static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_lvds_enc_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -821,13 +843,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -850,20 +872,20 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
- if (!intel_lvds) {
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
+ if (!intel_encoder) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_lvds);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_lvds->base;
- encoder = &intel_encoder->enc;
connector = &intel_connector->base;
+ encoder = &intel_encoder->enc;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -875,12 +897,16 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
+ intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
@@ -890,7 +916,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -998,6 +1024,6 @@ void intel_lvds_init(struct drm_device *dev)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_lvds);
+ kfree(intel_encoder);
kfree(intel_connector);
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_overlay.c b/trunk/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..d39aea24eabe 100644
--- a/trunk/drivers/gpu/drm/i915/intel_overlay.c
+++ b/trunk/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,8 +25,6 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-
-#include
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -1369,8 +1367,7 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->flip_addr = overlay->reg_bo->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS,
- 0);
+ I915_GEM_PHYS_OVERLAY_REGS);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
@@ -1419,99 +1416,3 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
}
-
-struct intel_overlay_error_state {
- struct overlay_registers regs;
- unsigned long base;
- u32 dovsta;
- u32 isr;
-};
-
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_overlay *overlay = dev_priv->overlay;
- struct intel_overlay_error_state *error;
- struct overlay_registers __iomem *regs;
-
- if (!overlay || !overlay->active)
- return NULL;
-
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
- if (error == NULL)
- return NULL;
-
- error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(ISR);
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->gtt_offset;
- else
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
-
- regs = intel_overlay_map_regs_atomic(overlay);
- if (!regs)
- goto err;
-
- memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay);
-
- return error;
-
-err:
- kfree(error);
- return NULL;
-}
-
-void
-intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
-{
- seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
- error->dovsta, error->isr);
- seq_printf(m, " Register file at 0x%08lx:\n",
- error->base);
-
-#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
- P(OBUF_0Y);
- P(OBUF_1Y);
- P(OBUF_0U);
- P(OBUF_0V);
- P(OBUF_1U);
- P(OBUF_1V);
- P(OSTRIDE);
- P(YRGB_VPH);
- P(UV_VPH);
- P(HORZ_PH);
- P(INIT_PHS);
- P(DWINPOS);
- P(DWINSZ);
- P(SWIDTH);
- P(SWIDTHSW);
- P(SHEIGHT);
- P(YRGBSCALE);
- P(UVSCALE);
- P(OCLRC0);
- P(OCLRC1);
- P(DCLRKV);
- P(DCLRKM);
- P(SCLRKVH);
- P(SCLRKVL);
- P(SCLRKEN);
- P(OCONFIG);
- P(OCMD);
- P(OSTART_0Y);
- P(OSTART_1Y);
- P(OSTART_0U);
- P(OSTART_0V);
- P(OSTART_1U);
- P(OSTART_1V);
- P(OTILEOFF_0Y);
- P(OTILEOFF_1Y);
- P(OTILEOFF_0U);
- P(OTILEOFF_0V);
- P(OTILEOFF_1U);
- P(OTILEOFF_1V);
- P(FASTHSCALE);
- P(UVSCALEV);
-#undef P
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c
deleted file mode 100644
index e7f5299d9d57..000000000000
--- a/trunk/drivers/gpu/drm/i915/intel_panel.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright © 2006-2010 Intel Corporation
- * Copyright (c) 2006 Dave Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt
- * Dave Airlie
- * Jesse Barnes
- * Chris Wilson
- */
-
-#include "intel_drv.h"
-
-void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode)
-{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
-
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
-
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-}
-
-/* adjusted_mode has been preset to be the panel's fixed mode */
-void
-intel_pch_panel_fitting(struct drm_device *dev,
- int fitting_mode,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int x, y, width, height;
-
- x = y = width = height = 0;
-
- /* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
- goto done;
-
- switch (fitting_mode) {
- case DRM_MODE_SCALE_CENTER:
- width = mode->hdisplay;
- height = mode->vdisplay;
- x = (adjusted_mode->hdisplay - width + 1)/2;
- y = (adjusted_mode->vdisplay - height + 1)/2;
- break;
-
- case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the aspect ratio */
- {
- u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
- u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / mode->vdisplay;
- x = (adjusted_mode->hdisplay - width + 1) / 2;
- y = 0;
- height = adjusted_mode->vdisplay;
- } else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / mode->hdisplay;
- y = (adjusted_mode->vdisplay - height + 1) / 2;
- x = 0;
- width = adjusted_mode->hdisplay;
- } else {
- x = y = 0;
- width = adjusted_mode->hdisplay;
- height = adjusted_mode->vdisplay;
- }
- }
- break;
-
- default:
- case DRM_MODE_SCALE_FULLSCREEN:
- x = y = 0;
- width = adjusted_mode->hdisplay;
- height = adjusted_mode->vdisplay;
- break;
- }
-
-done:
- dev_priv->pch_pf_pos = (x << 16) | y;
- dev_priv->pch_pf_size = (width << 16) | height;
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..26362f8495a8 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,35 +33,18 @@
#include "i915_drm.h"
#include "i915_trace.h"
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
-
- seqno = dev_priv->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
-
- return seqno;
-}
-
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 cmd;
-
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
-
- trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
+ u32 cmd;
+ trace_i915_gem_request_flush(dev, ring->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -220,13 +203,9 @@ static int init_render_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
- int mode;
-
if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- if (IS_GEN6(dev))
- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
- I915_WRITE(MI_MODE, mode);
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
}
return ret;
}
@@ -254,10 +233,9 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
-
- seqno = i915_gem_get_seqno(dev);
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ seqno = intel_ring_get_seqno(dev, ring);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
@@ -427,9 +405,7 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
-
- seqno = i915_gem_get_seqno(dev);
-
+ seqno = intel_ring_get_seqno(dev, ring);
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
@@ -503,7 +479,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
count = nbox ? nbox : 1;
@@ -539,16 +515,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
/* XXX breadcrumb */
-
return 0;
}
@@ -621,10 +588,9 @@ static int init_status_page(struct drm_device *dev,
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ int ret;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
- int ret;
-
ring->dev = dev;
if (I915_NEED_GFX_HWS(dev)) {
@@ -637,14 +603,16 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
- goto err_hws;
+ goto cleanup;
}
ring->gem_object = obj;
ret = i915_gem_object_pin(obj, ring->alignment);
- if (ret)
- goto err_unref;
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ goto cleanup;
+ }
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
@@ -656,14 +624,18 @@ int intel_init_ring_buffer(struct drm_device *dev,
drm_core_ioremap_wc(&ring->map, dev);
if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
ret = -EINVAL;
- goto err_unpin;
+ goto cleanup;
}
ring->virtual_start = ring->map.handle;
ret = ring->init(dev, ring);
- if (ret)
- goto err_unmap;
+ if (ret != 0) {
+ intel_cleanup_ring_buffer(dev, ring);
+ return ret;
+ }
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
@@ -677,15 +649,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
return ret;
-
-err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
-err_hws:
+cleanup:
cleanup_status_page(dev, ring);
return ret;
}
@@ -718,11 +682,9 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
+ rem /= 4;
+ while (rem--)
*virt++ = MI_NOOP;
- }
ring->tail = 0;
ring->space = ring->head - 8;
@@ -767,14 +729,21 @@ void intel_ring_begin(struct drm_device *dev,
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
intel_wait_ring_buffer(dev, ring, n);
+}
- ring->space -= n;
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+ ring->tail &= ring->size - 1;
+ ring->space -= 4;
}
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- ring->tail &= ring->size - 1;
ring->advance_ring(dev, ring);
}
@@ -793,6 +762,18 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ seqno = ring->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++ring->next_seqno == 0)
+ ring->next_seqno = 1;
+ return seqno;
+}
+
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
@@ -810,6 +791,7 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
+ .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
@@ -848,6 +830,7 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
+ .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..d5568d3766de 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,6 +26,7 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
+ u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -105,16 +106,8 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
- ring->tail += 4;
-}
-
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, u32 data);
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
index e3b7a7ee39cb..d9d4d51aa89e 100644
--- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
-#include "drm_edid.h"
#include "intel_drv.h"
+#include "drm_edid.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
@@ -47,10 +47,9 @@
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
-#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
-static const char *tv_format_names[] = {
+static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
@@ -62,9 +61,7 @@ static const char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-struct intel_sdvo {
- struct intel_encoder base;
-
+struct intel_sdvo_priv {
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
@@ -98,7 +95,7 @@ struct intel_sdvo {
bool is_tv;
/* This is for current tv format name */
- int tv_format_index;
+ char *tv_format_name;
/**
* This is set if we treat the device as HDMI, instead of DVI.
@@ -135,40 +132,37 @@ struct intel_sdvo {
};
struct intel_sdvo_connector {
- struct intel_connector base;
-
/* Mark the type of connector */
uint16_t output_flag;
/* This contains all current supported TV format */
- u8 tv_format_supported[TV_FORMAT_NUM];
+ char *tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
- struct drm_property *tv_format;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
/* add the property for the SDVO-TV */
- struct drm_property *left;
- struct drm_property *right;
- struct drm_property *top;
- struct drm_property *bottom;
- struct drm_property *hpos;
- struct drm_property *vpos;
- struct drm_property *contrast;
- struct drm_property *saturation;
- struct drm_property *hue;
- struct drm_property *sharpness;
- struct drm_property *flicker_filter;
- struct drm_property *flicker_filter_adaptive;
- struct drm_property *flicker_filter_2d;
- struct drm_property *tv_chroma_filter;
- struct drm_property *tv_luma_filter;
- struct drm_property *dot_crawl;
+ struct drm_property *left_property;
+ struct drm_property *right_property;
+ struct drm_property *top_property;
+ struct drm_property *bottom_property;
+ struct drm_property *hpos_property;
+ struct drm_property *vpos_property;
/* add the property for the SDVO-TV/LVDS */
- struct drm_property *brightness;
+ struct drm_property *brightness_property;
+ struct drm_property *contrast_property;
+ struct drm_property *saturation_property;
+ struct drm_property *hue_property;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
-
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
@@ -177,54 +171,36 @@ struct intel_sdvo_connector {
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
- u32 cur_sharpness, max_sharpness;
- u32 cur_flicker_filter, max_flicker_filter;
- u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
- u32 cur_flicker_filter_2d, max_flicker_filter_2d;
- u32 cur_tv_chroma_filter, max_tv_chroma_filter;
- u32 cur_tv_luma_filter, max_tv_luma_filter;
- u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
-}
-
-static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
-{
- return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
-}
-
-static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
-static bool
-intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- int type);
static bool
-intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- I915_READ(intel_sdvo->sdvo_reg);
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
return;
}
- if (intel_sdvo->sdvo_reg == SDVOB) {
+ if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -243,27 +219,33 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
+ u8 *ch)
{
- u8 out_buf[2] = { addr, 0 };
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ u8 out_buf[2];
u8 buf[2];
+ int ret;
+
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
- int ret;
- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -273,26 +255,35 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
+static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
+ u8 ch)
{
- u8 out_buf[2] = { addr, ch };
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
+ {
+ return true;
+ }
+ return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
- const char *name;
+ char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -337,14 +328,13 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
-
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -363,27 +353,6 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
-
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -408,15 +377,17 @@ static const struct _sdvo_cmd_name {
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
-static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
+ void *args, int args_len)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(intel_sdvo), cmd);
+ SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
@@ -432,20 +403,19 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
+ void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]))
- return false;
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
+ ((u8*)args)[i]);
}
- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -458,13 +428,14 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
void *response, int response_len,
u8 status)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
@@ -476,8 +447,8 @@ static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
- void *response, int response_len)
+static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
+ void *response, int response_len)
{
int i;
u8 status;
@@ -486,26 +457,24 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]))
- return false;
+ intel_sdvo_read_byte(intel_encoder,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]);
}
/* read the return status */
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
- &status))
- return false;
+ intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
+ &status);
- intel_sdvo_debug_response(intel_sdvo, response, response_len,
+ intel_sdvo_debug_response(intel_encoder, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
- break;
+ return status;
mdelay(50);
}
- return status == SDVO_CMD_STATUS_SUCCESS;
+ return status;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -525,36 +494,37 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
u8 target)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
},
/* the following two are to read the response */
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = cmd_buf,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = ret_value,
},
};
- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -563,7 +533,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -577,29 +547,23 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
return;
}
-static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
- return false;
+ struct intel_sdvo_set_target_input_args targets = {0};
+ u8 status;
- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
-}
+ if (target_0 && target_1)
+ return SDVO_CMD_STATUS_NOTSUPP;
-static bool
-intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
-{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
- return false;
+ if (target_1)
+ targets.target_1 = 1;
- return intel_sdvo_read_response(intel_sdvo, value, len);
-}
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
+ sizeof(targets));
-static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
-{
- struct intel_sdvo_set_target_input_args targets = {0};
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TARGET_INPUT,
- &targets, sizeof(targets));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
/**
@@ -608,12 +572,14 @@ static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
+ u8 status;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
- &response, sizeof(response)))
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
*input_1 = response.input0_trained;
@@ -621,18 +587,21 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
return true;
}
-static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_ACTIVE_OUTPUTS,
- &outputs, sizeof(outputs));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+ sizeof(outputs));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
int mode)
{
- u8 state = SDVO_ENCODER_STATE_ON;
+ u8 status, state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -649,63 +618,88 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
break;
}
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+ sizeof(state));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
- &clocks, sizeof(clocks)))
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
+
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
u16 outputs)
{
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TARGET_OUTPUT,
- &outputs, sizeof(outputs));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+ sizeof(outputs));
+
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
- intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
-static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_sdvo,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_sdvo,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ uint8_t status;
memset(&args, 0, sizeof(args));
args.clock = clock;
@@ -713,32 +707,59 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (intel_sdvo->is_lvds &&
- (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
- intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ if (sdvo_priv->is_lvds &&
+ (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
+ sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
- &args, sizeof(args));
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
- &dtd->part1, sizeof(dtd->part1)) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
- &dtd->part2, sizeof(dtd->part2));
+ bool status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
+ sizeof(dtd->part1));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
+ sizeof(dtd->part2));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return false;
}
-static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
- const struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -787,7 +808,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
}
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- const struct intel_sdvo_dtd *dtd)
+ struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -819,33 +840,45 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
struct intel_sdvo_encode *encode)
{
- if (intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPP_ENCODE,
- encode, sizeof(*encode)))
- return true;
+ uint8_t status;
- /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
+ if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
+ memset(encode, 0, sizeof(*encode));
+ return false;
+ }
+
+ return true;
}
-static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
uint8_t mode)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+ uint8_t status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
uint8_t mode)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ uint8_t status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
{
int i, j;
uint8_t set_buf_index[2];
@@ -854,7 +887,8 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
+ intel_sdvo_read_response(encoder, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -874,7 +908,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
int index,
uint8_t *data, int8_t size, uint8_t tx_rate)
{
@@ -883,18 +917,15 @@ static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
set_buf_index[0] = index;
set_buf_index[1] = 0;
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
for (; size > 0; size -= 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
- return false;
-
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
data += 8;
}
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -969,7 +1000,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -980,107 +1011,133 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
+ SDVO_HBUF_TX_VSYNC);
}
-static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
{
+
struct intel_sdvo_tv_format format;
- uint32_t format_map;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ uint32_t format_map, i;
+ uint8_t status;
+
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
- format_map = 1 << intel_sdvo->tv_format_index;
+ format_map = 1 << i;
memset(&format, 0, sizeof(format));
- memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+ memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
+ sizeof(format) : sizeof(format_map));
- BUILD_BUG_ON(sizeof(format) != 6);
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TV_FORMAT,
- &format, sizeof(format));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
+ sizeof(format));
+
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ DRM_DEBUG_KMS("%s: Failed to set TV format\n",
+ SDVO_NAME(sdvo_priv));
}
-static bool
-intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode)
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo_dtd output_dtd;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
- if (!intel_sdvo_set_target_output(intel_sdvo,
- intel_sdvo->attached_output))
- return false;
+ if (dev_priv->is_tv) {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
- intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- return false;
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
- return true;
-}
-static bool
-intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct intel_sdvo_dtd input_dtd;
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
- /* Reset the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- return false;
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay))
- return false;
- if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &input_dtd))
- return false;
+ success = intel_sdvo_create_preferred_input_timing(intel_encoder,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- mode->clock = adjusted_mode->clock;
- return true;
-}
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ mode->clock = adjusted_mode->clock;
- /* We need to construct preferred input timings based on our
- * output timings. To do that, we have to set the output
- * timings, even though this isn't really the right place in
- * the sequence to do it. Oh well.
- */
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ adjusted_mode->clock *=
+ intel_sdvo_get_pixel_multiplier(mode);
+ } else {
return false;
+ }
+ } else if (dev_priv->is_lvds) {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
- } else if (intel_sdvo->is_lvds) {
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
+ drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ dev_priv->sdvo_lvds_fixed_mode);
- if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
- return false;
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
- }
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ success = intel_sdvo_create_preferred_input_timing(
+ intel_encoder,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
+
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode->clock = adjusted_mode->clock;
+
+ adjusted_mode->clock *=
+ intel_sdvo_get_pixel_multiplier(mode);
+ } else {
+ return false;
+ }
+
+ } else {
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ }
return true;
}
@@ -1092,11 +1149,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 sdvox = 0;
- int sdvo_pixel_multiply, rate;
+ int sdvo_pixel_multiply;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
+ u8 status;
if (!mode)
return;
@@ -1107,46 +1166,41 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = intel_sdvo->attached_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
- intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
- if (intel_sdvo->is_hdmi) {
- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
- return;
-
+ if (sdvo_priv->is_hdmi) {
+ intel_sdvo_set_avi_infoframe(intel_encoder, mode);
sdvox |= SDVO_AUDIO_ENABLE;
}
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
+ } else
+ intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
*/
- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+ if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
- if (!intel_sdvo_set_target_output(intel_sdvo,
- intel_sdvo->attached_output))
- return;
-
- (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
+ intel_sdvo_set_target_output(intel_encoder,
+ sdvo_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- return;
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_tv_format(intel_sdvo))
- return;
- }
+ if (sdvo_priv->is_tv)
+ intel_sdvo_set_tv_format(intel_encoder);
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1163,17 +1217,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+ intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
#endif
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- switch (sdvo_pixel_multiply) {
- case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
- case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
- case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
+ case 1:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_1X);
+ break;
+ case 2:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_2X);
+ break;
+ case 4:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_4X);
+ break;
}
- if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
- return;
/* Set the SDVO control regs. */
if (IS_I965G(dev)) {
@@ -1183,8 +1243,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
- switch (intel_sdvo->sdvo_reg) {
+ sdvox |= I915_READ(sdvo_priv->sdvo_reg);
+ switch (sdvo_priv->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1197,6 +1257,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
if (IS_I965G(dev)) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1205,28 +1266,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+ intel_sdvo_write_sdvox(intel_encoder, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ intel_sdvo_set_active_outputs(intel_encoder, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1234,25 +1295,28 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
+
+ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
+ &input2);
+
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ "sync\n", SDVO_NAME(sdvo_priv));
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
@@ -1261,31 +1325,42 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (intel_sdvo->pixel_clock_min > mode->clock)
+ if (sdvo_priv->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
- if (intel_sdvo->pixel_clock_max < mode->clock)
+ if (sdvo_priv->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
- if (intel_sdvo->is_lvds) {
- if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ if (sdvo_priv->is_lvds == true) {
+ if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
return MODE_PANEL;
- if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
/* No use! */
@@ -1293,12 +1368,12 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_sdvo *iout = NULL;
- struct intel_sdvo *sdvo;
+ struct intel_encoder *iout = NULL;
+ struct intel_sdvo_priv *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_sdvo(connector);
+ iout = to_intel_encoder(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
@@ -1320,69 +1395,75 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_sdvo = to_intel_sdvo(connector);
+ intel_encoder = to_intel_encoder(connector);
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
+
+ if (response[0] !=0)
+ return 1;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ return 0;
}
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
}
#endif
static bool
-intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
caps++;
@@ -1394,11 +1475,11 @@ intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (encoder == intel_attached_encoder(connector))
return connector;
@@ -1412,8 +1493,8 @@ static int
intel_analog_is_connected(struct drm_device *dev)
{
struct drm_connector *analog_connector;
-
analog_connector = intel_find_analog_connector(dev);
+
if (!analog_connector)
return false;
@@ -1428,52 +1509,54 @@ enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
uint8_t saved_ddc, temp_ddc;
- saved_ddc = intel_sdvo->ddc_bus;
- temp_ddc = intel_sdvo->ddc_bus >> 1;
+ saved_ddc = sdvo_priv->ddc_bus;
+ temp_ddc = sdvo_priv->ddc_bus >> 1;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
while(temp_ddc > 1) {
- intel_sdvo->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ sdvo_priv->ddc_bus = temp_ddc;
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
* correct DDC bus. Update it.
*/
- intel_sdvo->ddc_bus = temp_ddc;
+ sdvo_priv->ddc_bus = temp_ddc;
break;
}
temp_ddc >>= 1;
}
if (edid == NULL)
- intel_sdvo->ddc_bus = saved_ddc;
+ sdvo_priv->ddc_bus = saved_ddc;
}
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
if (edid != NULL) {
bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
/* DDC bus is shared, match EDID to connector type */
if (is_digital && need_digital)
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
else if (is_digital != need_digital)
status = connector_status_disconnected;
@@ -1489,29 +1572,33 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
{
uint16_t response;
+ u8 status;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
- if (intel_sdvo->is_tv) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ if (sdvo_priv->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
- return connector_status_unknown;
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return connector_status_unknown;
+
if (response == 0)
return connector_status_disconnected;
- intel_sdvo->attached_output = response;
+ sdvo_priv->attached_output = response;
- if ((intel_sdvo_connector->output_flag & response) == 0)
+ if ((sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK)
ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1520,16 +1607,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
- intel_sdvo->is_tv = false;
- intel_sdvo->is_lvds = false;
- intel_sdvo->base.needs_tv_clock = false;
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
- intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ sdvo_priv->is_lvds = true;
}
return ret;
@@ -1538,11 +1625,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,11 +1639,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* which case we'll look there for the digital DDC data.
*/
if (num_modes == 0 &&
- intel_sdvo->analog_ddc_bus &&
+ sdvo_priv->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1627,43 +1715,52 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
+ uint8_t status;
+
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << intel_sdvo->tv_format_index;
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
+
+ format_map = (1 << i);
memcpy(&tv_res, &format_map,
- min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+ sizeof(struct intel_sdvo_sdtv_resolution_request) >
+ sizeof(format_map) ? sizeof(format_map) :
+ sizeof(struct intel_sdvo_sdtv_resolution_request));
- if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
- return;
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- BUILD_BUG_ON(sizeof(tv_res) != 3);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res)))
- return;
- if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res));
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
- &sdvo_tv_modes[i]);
+ &sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
+
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1671,7 +1768,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1690,9 +1787,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
+ sdvo_priv->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- intel_sdvo->is_lvds = true;
break;
}
}
@@ -1701,67 +1797,66 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (IS_TV(intel_sdvo_connector))
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (IS_LVDS(intel_sdvo_connector))
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
- return !list_empty(&connector->probed_modes);
+ if (list_empty(&connector->probed_modes))
+ return 0;
+ return 1;
}
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static
+void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (intel_sdvo_connector->left)
- drm_property_destroy(dev, intel_sdvo_connector->left);
- if (intel_sdvo_connector->right)
- drm_property_destroy(dev, intel_sdvo_connector->right);
- if (intel_sdvo_connector->top)
- drm_property_destroy(dev, intel_sdvo_connector->top);
- if (intel_sdvo_connector->bottom)
- drm_property_destroy(dev, intel_sdvo_connector->bottom);
- if (intel_sdvo_connector->hpos)
- drm_property_destroy(dev, intel_sdvo_connector->hpos);
- if (intel_sdvo_connector->vpos)
- drm_property_destroy(dev, intel_sdvo_connector->vpos);
- if (intel_sdvo_connector->saturation)
- drm_property_destroy(dev, intel_sdvo_connector->saturation);
- if (intel_sdvo_connector->contrast)
- drm_property_destroy(dev, intel_sdvo_connector->contrast);
- if (intel_sdvo_connector->hue)
- drm_property_destroy(dev, intel_sdvo_connector->hue);
- if (intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, intel_sdvo_connector->sharpness);
- if (intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
- if (intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
- if (intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
- if (intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
- if (intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
- if (intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
- if (intel_sdvo_connector->brightness)
- drm_property_destroy(dev, intel_sdvo_connector->brightness);
+ if (IS_TV(sdvo_priv)) {
+ if (sdvo_priv->left_property)
+ drm_property_destroy(dev, sdvo_priv->left_property);
+ if (sdvo_priv->right_property)
+ drm_property_destroy(dev, sdvo_priv->right_property);
+ if (sdvo_priv->top_property)
+ drm_property_destroy(dev, sdvo_priv->top_property);
+ if (sdvo_priv->bottom_property)
+ drm_property_destroy(dev, sdvo_priv->bottom_property);
+ if (sdvo_priv->hpos_property)
+ drm_property_destroy(dev, sdvo_priv->hpos_property);
+ if (sdvo_priv->vpos_property)
+ drm_property_destroy(dev, sdvo_priv->vpos_property);
+ if (sdvo_priv->saturation_property)
+ drm_property_destroy(dev,
+ sdvo_priv->saturation_property);
+ if (sdvo_priv->contrast_property)
+ drm_property_destroy(dev,
+ sdvo_priv->contrast_property);
+ if (sdvo_priv->hue_property)
+ drm_property_destroy(dev, sdvo_priv->hue_property);
+ }
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
+ if (sdvo_priv->brightness_property)
+ drm_property_destroy(dev,
+ sdvo_priv->brightness_property);
+ }
+ return;
}
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (intel_sdvo_connector->tv_format)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- intel_sdvo_connector->tv_format);
+ sdvo_connector->tv_format_property);
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
@@ -1775,118 +1870,132 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct drm_crtc *crtc = encoder->crtc;
+ int ret = 0;
+ bool changed = false;
+ uint8_t cmd, status;
uint16_t temp_value;
- uint8_t cmd;
- int ret;
ret = drm_connector_property_set_value(connector, property, val);
- if (ret)
- return ret;
-
-#define CHECK_PROPERTY(name, NAME) \
- if (intel_sdvo_connector->name == property) { \
- if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
- if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
- cmd = SDVO_CMD_SET_##NAME; \
- intel_sdvo_connector->cur_##name = temp_value; \
- goto set_value; \
- }
+ if (ret < 0)
+ goto out;
- if (property == intel_sdvo_connector->tv_format) {
- if (val >= TV_FORMAT_NUM)
- return -EINVAL;
+ if (property == sdvo_connector->tv_format_property) {
+ if (val >= TV_FORMAT_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (sdvo_priv->tv_format_name ==
+ sdvo_connector->tv_format_supported[val])
+ goto out;
- if (intel_sdvo->tv_format_index ==
- intel_sdvo_connector->tv_format_supported[val])
- return 0;
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
+ changed = true;
+ }
- intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
- goto done;
- } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
+ cmd = 0;
temp_value = val;
- if (intel_sdvo_connector->left == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->right, val);
- if (intel_sdvo_connector->left_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->left_margin = temp_value;
- intel_sdvo_connector->right_margin = temp_value;
- temp_value = intel_sdvo_connector->max_hscan -
- intel_sdvo_connector->left_margin;
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
+ goto out;
+
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- goto set_value;
- } else if (intel_sdvo_connector->right == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->left, val);
- if (intel_sdvo_connector->right_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->left_margin = temp_value;
- intel_sdvo_connector->right_margin = temp_value;
- temp_value = intel_sdvo_connector->max_hscan -
- intel_sdvo_connector->left_margin;
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
+ goto out;
+
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- goto set_value;
- } else if (intel_sdvo_connector->top == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->bottom, val);
- if (intel_sdvo_connector->top_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->top_margin = temp_value;
- intel_sdvo_connector->bottom_margin = temp_value;
- temp_value = intel_sdvo_connector->max_vscan -
- intel_sdvo_connector->top_margin;
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
+ goto out;
+
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- goto set_value;
- } else if (intel_sdvo_connector->bottom == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->top, val);
- if (intel_sdvo_connector->bottom_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->top_margin = temp_value;
- intel_sdvo_connector->bottom_margin = temp_value;
- temp_value = intel_sdvo_connector->max_vscan -
- intel_sdvo_connector->top_margin;
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
+ goto out;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- goto set_value;
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_H;
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_V;
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_SATURATION;
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_CONTRAST;
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_HUE;
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_BRIGHTNESS;
+ sdvo_connector->cur_brightness = temp_value;
+ }
+ if (cmd) {
+ intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_encoder,
+ NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO command \n");
+ return -EINVAL;
+ }
+ changed = true;
}
- CHECK_PROPERTY(hpos, HPOS)
- CHECK_PROPERTY(vpos, VPOS)
- CHECK_PROPERTY(saturation, SATURATION)
- CHECK_PROPERTY(contrast, CONTRAST)
- CHECK_PROPERTY(hue, HUE)
- CHECK_PROPERTY(brightness, BRIGHTNESS)
- CHECK_PROPERTY(sharpness, SHARPNESS)
- CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
- CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
- CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
- CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
- CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
- CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
-
- return -EINVAL; /* unknown property */
-
-set_value:
- if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
- return -EIO;
-
-
-done:
- if (encoder->crtc) {
- struct drm_crtc *crtc = encoder->crtc;
-
+ if (changed && crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
- }
-
- return 0;
-#undef CHECK_PROPERTY
+ crtc->y, crtc->fb);
+out:
+ return ret;
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -1913,57 +2022,28 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- if (intel_sdvo->analog_ddc_bus)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
- intel_sdvo->sdvo_lvds_fixed_mode);
+ sdvo_priv->sdvo_lvds_fixed_mode);
- intel_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
.destroy = intel_sdvo_enc_destroy,
};
-static void
-intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
-{
- uint16_t mask = 0;
- unsigned int num_bits;
-
- /* Make a mask of outputs less than or equal to our own priority in the
- * list.
- */
- switch (sdvo->controlled_output) {
- case SDVO_OUTPUT_LVDS1:
- mask |= SDVO_OUTPUT_LVDS1;
- case SDVO_OUTPUT_LVDS0:
- mask |= SDVO_OUTPUT_LVDS0;
- case SDVO_OUTPUT_TMDS1:
- mask |= SDVO_OUTPUT_TMDS1;
- case SDVO_OUTPUT_TMDS0:
- mask |= SDVO_OUTPUT_TMDS0;
- case SDVO_OUTPUT_RGB1:
- mask |= SDVO_OUTPUT_RGB1;
- case SDVO_OUTPUT_RGB0:
- mask |= SDVO_OUTPUT_RGB0;
- break;
- }
-
- /* Count bits to find what number we are in the priority list. */
- mask &= sdvo->caps.output_flags;
- num_bits = hweight16(mask);
- /* If more than 3 outputs, default to DDC bus 3 for now. */
- if (num_bits > 3)
- num_bits = 3;
-
- /* Corresponds to SDVO_CONTROL_BUS_DDCx */
- sdvo->ddc_bus = 1 << num_bits;
-}
/**
* Choose the appropriate DDC bus for control bus switch command for this
@@ -1974,7 +2054,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
*/
static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
- struct intel_sdvo *sdvo, u32 reg)
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
@@ -1983,53 +2063,61 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
else
mapping = &(dev_priv->sdvo_mappings[1]);
- if (mapping->initialized)
- sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
- else
- intel_sdvo_guess_ddc_bus(sdvo);
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
- return intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
- &intel_sdvo->is_hdmi, 1);
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ uint8_t status;
+
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+ return true;
}
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
+static struct intel_encoder *
+intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.ddc_bus == &chan->adapter)
- return intel_sdvo;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
+ break;
}
-
- return NULL;
+ return intel_encoder;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_sdvo =
- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
- (algo_data->data));
- if (intel_sdvo == NULL)
+ intel_encoder =
+ intel_sdvo_chan_to_intel_encoder(
+ (struct intel_i2c_chan *)(algo_data->data));
+ if (intel_encoder == NULL)
return -EINVAL;
- algo = intel_sdvo->base.i2c_bus->algo;
+ sdvo_priv = intel_encoder->dev_priv;
+ algo = intel_encoder->i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2074,9 +2162,27 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
return 0x72;
}
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
+{
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
+}
+
static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
connector->connector_type);
@@ -2092,470 +2198,582 @@ intel_sdvo_connector_init(struct drm_encoder *encoder,
}
static bool
-intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
+ if (!intel_sdvo_connector_alloc(&intel_connector))
return false;
+ sdvo_connector = intel_connector->dev_priv;
+
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
- && intel_sdvo->is_hdmi) {
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
/* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_sdvo,
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
return true;
}
static bool
-intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
- intel_sdvo->controlled_output |= type;
- intel_sdvo_connector->output_flag = type;
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
- goto err;
+ intel_sdvo_tv_create_property(connector, type);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- goto err;
+ intel_sdvo_create_enhance_property(connector);
return true;
-
-err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
- return false;
}
static bool
-intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
return true;
}
static bool
-intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
+ connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
-
- intel_sdvo_connector_init(encoder, connector);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- goto err;
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- return true;
-
-err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
- return false;
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
}
static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
{
- intel_sdvo->is_tv = false;
- intel_sdvo->base.needs_tv_clock = false;
- intel_sdvo->is_lvds = false;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ sdvo_priv->is_tv = false;
+ intel_encoder->needs_tv_clock = false;
+ sdvo_priv->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
- if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
- if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
- if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
- if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
- if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
- if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
- intel_sdvo->controlled_output = 0;
- memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ sdvo_priv->controlled_output = 0;
+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(intel_sdvo),
+ SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
return true;
}
-static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- int type)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
+ uint8_t status;
- if (!intel_sdvo_set_target_output(intel_sdvo, type))
- return false;
+ intel_sdvo_set_target_output(intel_encoder, type);
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
- &format, sizeof(format)))
- return false;
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &format, sizeof(format));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return;
- memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+ memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
+ sizeof(format_map) : sizeof(format));
if (format_map == 0)
- return false;
+ return;
- intel_sdvo_connector->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
- if (format_map & (1 << i))
- intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+ if (format_map & (1 << i)) {
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
+ tv_format_names[i];
+ }
- intel_sdvo_connector->tv_format =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", intel_sdvo_connector->format_supported_num);
- if (!intel_sdvo_connector->tv_format)
- return false;
+ sdvo_connector->tv_format_property =
+ drm_property_create(
+ connector->dev, DRM_MODE_PROP_ENUM,
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- intel_sdvo_connector->tv_format, i,
- i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&intel_sdvo_connector->base.base,
- intel_sdvo_connector->tv_format, 0);
- return true;
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(
+ connector, sdvo_connector->tv_format_property, 0);
}
-#define ENHANCEMENT(name, NAME) do { \
- if (enhancements.name) { \
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
- !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
- return false; \
- intel_sdvo_connector->max_##name = data_value[0]; \
- intel_sdvo_connector->cur_##name = response; \
- intel_sdvo_connector->name = \
- drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
- if (!intel_sdvo_connector->name) return false; \
- intel_sdvo_connector->name->values[0] = 0; \
- intel_sdvo_connector->name->values[1] = data_value[0]; \
- drm_connector_attach_property(connector, \
- intel_sdvo_connector->name, \
- intel_sdvo_connector->cur_##name); \
- DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
- data_value[0], data_value[1], response); \
- } \
-} while(0)
-
-static bool
-intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- struct intel_sdvo_enhancements_reply enhancements)
+static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
- struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
+ struct intel_sdvo_enhancements_reply sdvo_data;
+ struct drm_device *dev = connector->dev;
+ uint8_t status;
uint16_t response, data_value[2];
- /* when horizontal overscan is supported, Add the left/right property */
- if (enhancements.overscan_h) {
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_MAX_OVERSCAN_H,
- &data_value, 4))
- return false;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_OVERSCAN_H,
- &response, 2))
- return false;
-
- intel_sdvo_connector->max_hscan = data_value[0];
- intel_sdvo_connector->left_margin = data_value[0] - response;
- intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
- intel_sdvo_connector->left =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
- if (!intel_sdvo_connector->left)
- return false;
-
- intel_sdvo_connector->left->values[0] = 0;
- intel_sdvo_connector->left->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->left,
- intel_sdvo_connector->left_margin);
-
- intel_sdvo_connector->right =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
- if (!intel_sdvo_connector->right)
- return false;
-
- intel_sdvo_connector->right->values[0] = 0;
- intel_sdvo_connector->right->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->right,
- intel_sdvo_connector->right_margin);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
+ sizeof(sdvo_data));
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS(" incorrect response is returned\n");
+ return;
}
-
- if (enhancements.overscan_v) {
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_MAX_OVERSCAN_V,
- &data_value, 4))
- return false;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_OVERSCAN_V,
- &response, 2))
- return false;
-
- intel_sdvo_connector->max_vscan = data_value[0];
- intel_sdvo_connector->top_margin = data_value[0] - response;
- intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
- intel_sdvo_connector->top =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
- if (!intel_sdvo_connector->top)
- return false;
-
- intel_sdvo_connector->top->values[0] = 0;
- intel_sdvo_connector->top->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->top,
- intel_sdvo_connector->top_margin);
-
- intel_sdvo_connector->bottom =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
- if (!intel_sdvo_connector->bottom)
- return false;
-
- intel_sdvo_connector->bottom->values[0] = 0;
- intel_sdvo_connector->bottom->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->bottom,
- intel_sdvo_connector->bottom_margin);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ response = *((uint16_t *)&sdvo_data);
+ if (!response) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return;
}
-
- ENHANCEMENT(hpos, HPOS);
- ENHANCEMENT(vpos, VPOS);
- ENHANCEMENT(saturation, SATURATION);
- ENHANCEMENT(contrast, CONTRAST);
- ENHANCEMENT(hue, HUE);
- ENHANCEMENT(sharpness, SHARPNESS);
- ENHANCEMENT(brightness, BRIGHTNESS);
- ENHANCEMENT(flicker_filter, FLICKER_FILTER);
- ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
- ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
- ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
- ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
-
- if (enhancements.dot_crawl) {
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
- return false;
-
- intel_sdvo_connector->max_dot_crawl = 1;
- intel_sdvo_connector->cur_dot_crawl = response & 0x1;
- intel_sdvo_connector->dot_crawl =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
- if (!intel_sdvo_connector->dot_crawl)
- return false;
-
- intel_sdvo_connector->dot_crawl->values[0] = 0;
- intel_sdvo_connector->dot_crawl->values[1] = 1;
- drm_connector_attach_property(connector,
- intel_sdvo_connector->dot_crawl,
- intel_sdvo_connector->cur_dot_crawl);
- DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ if (IS_TV(sdvo_priv)) {
+ /* when horizontal overscan is supported, Add the left/right
+ * property
+ */
+ if (sdvo_data.overscan_h) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "h_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
+ return;
+ }
+ sdvo_priv->max_hscan = data_value[0];
+ sdvo_priv->left_margin = data_value[0] - response;
+ sdvo_priv->right_margin = sdvo_priv->left_margin;
+ sdvo_priv->left_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ sdvo_priv->left_property->values[0] = 0;
+ sdvo_priv->left_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->left_property,
+ sdvo_priv->left_margin);
+ sdvo_priv->right_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ sdvo_priv->right_property->values[0] = 0;
+ sdvo_priv->right_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->right_property,
+ sdvo_priv->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.overscan_v) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "v_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
+ return;
+ }
+ sdvo_priv->max_vscan = data_value[0];
+ sdvo_priv->top_margin = data_value[0] - response;
+ sdvo_priv->bottom_margin = sdvo_priv->top_margin;
+ sdvo_priv->top_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ sdvo_priv->top_property->values[0] = 0;
+ sdvo_priv->top_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->top_property,
+ sdvo_priv->top_margin);
+ sdvo_priv->bottom_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ sdvo_priv->bottom_property->values[0] = 0;
+ sdvo_priv->bottom_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->bottom_property,
+ sdvo_priv->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_h) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
+ return;
+ }
+ sdvo_priv->max_hpos = data_value[0];
+ sdvo_priv->cur_hpos = response;
+ sdvo_priv->hpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hpos", 2);
+ sdvo_priv->hpos_property->values[0] = 0;
+ sdvo_priv->hpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hpos_property,
+ sdvo_priv->cur_hpos);
+ DRM_DEBUG_KMS("h_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_v) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
+ return;
+ }
+ sdvo_priv->max_vpos = data_value[0];
+ sdvo_priv->cur_vpos = response;
+ sdvo_priv->vpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "vpos", 2);
+ sdvo_priv->vpos_property->values[0] = 0;
+ sdvo_priv->vpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->vpos_property,
+ sdvo_priv->cur_vpos);
+ DRM_DEBUG_KMS("v_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.saturation) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
+ return;
+ }
+ sdvo_priv->max_saturation = data_value[0];
+ sdvo_priv->cur_saturation = response;
+ sdvo_priv->saturation_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "saturation", 2);
+ sdvo_priv->saturation_property->values[0] = 0;
+ sdvo_priv->saturation_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->saturation_property,
+ sdvo_priv->cur_saturation);
+ DRM_DEBUG_KMS("saturation: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.contrast) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
+ return;
+ }
+ sdvo_priv->max_contrast = data_value[0];
+ sdvo_priv->cur_contrast = response;
+ sdvo_priv->contrast_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "contrast", 2);
+ sdvo_priv->contrast_property->values[0] = 0;
+ sdvo_priv->contrast_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->contrast_property,
+ sdvo_priv->cur_contrast);
+ DRM_DEBUG_KMS("contrast: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.hue) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
+ return;
+ }
+ sdvo_priv->max_hue = data_value[0];
+ sdvo_priv->cur_hue = response;
+ sdvo_priv->hue_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hue", 2);
+ sdvo_priv->hue_property->values[0] = 0;
+ sdvo_priv->hue_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hue_property,
+ sdvo_priv->cur_hue);
+ DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
}
-
- return true;
-}
-
-static bool
-intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- struct intel_sdvo_enhancements_reply enhancements)
-{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
- struct drm_connector *connector = &intel_sdvo_connector->base.base;
- uint16_t response, data_value[2];
-
- ENHANCEMENT(brightness, BRIGHTNESS);
-
- return true;
-}
-#undef ENHANCEMENT
-
-static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector)
-{
- union {
- struct intel_sdvo_enhancements_reply reply;
- uint16_t response;
- } enhancements;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements)))
- return false;
-
- if (enhancements.response == 0) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
- return true;
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
+ if (sdvo_data.brightness) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
+ return;
+ }
+ sdvo_priv->max_brightness = data_value[0];
+ sdvo_priv->cur_brightness = response;
+ sdvo_priv->brightness_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "brightness", 2);
+ sdvo_priv->brightness_property->values[0] = 0;
+ sdvo_priv->brightness_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->brightness_property,
+ sdvo_priv->cur_brightness);
+ DRM_DEBUG_KMS("brightness: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
}
-
- if (IS_TV(intel_sdvo_connector))
- return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else if(IS_LVDS(intel_sdvo_connector))
- return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else
- return true;
-
+ return;
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_sdvo *intel_sdvo;
+ struct intel_sdvo_priv *sdvo_priv;
u8 ch[0x40];
int i;
u32 i2c_reg, ddc_reg, analog_ddc_reg;
- intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
- if (!intel_sdvo)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+ if (!intel_encoder) {
return false;
+ }
- intel_sdvo->sdvo_reg = sdvo_reg;
+ sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
+ sdvo_priv->sdvo_reg = sdvo_reg;
- intel_encoder = &intel_sdvo->base;
+ intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
if (HAS_PCH_SPLIT(dev)) {
@@ -2577,14 +2795,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
@@ -2594,16 +2812,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* setup the DDC bus. */
if (IS_SDVOB(sdvo_reg)) {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
+
+ if (intel_encoder->ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
@@ -2614,56 +2833,53 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
- if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
- goto err_enc;
+ intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
- if (intel_sdvo_output_setup(intel_sdvo,
- intel_sdvo->caps.output_flags) != true) {
+ if (intel_sdvo_output_setup(intel_encoder,
+ sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_enc;
+ goto err_i2c;
}
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err_enc;
+ intel_sdvo_set_target_input(intel_encoder, true, false);
+
+ intel_sdvo_get_input_pixel_clock_range(intel_encoder,
+ &sdvo_priv->pixel_clock_min,
+ &sdvo_priv->pixel_clock_max);
- if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
- &intel_sdvo->pixel_clock_min,
- &intel_sdvo->pixel_clock_max))
- goto err_enc;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
- SDVO_NAME(intel_sdvo),
- intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
- intel_sdvo->caps.device_rev_id,
- intel_sdvo->pixel_clock_min / 1000,
- intel_sdvo->pixel_clock_max / 1000,
- (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ SDVO_NAME(sdvo_priv),
+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+ sdvo_priv->caps.device_rev_id,
+ sdvo_priv->pixel_clock_min / 1000,
+ sdvo_priv->pixel_clock_max / 1000,
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
- intel_sdvo->caps.output_flags &
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- intel_sdvo->caps.output_flags &
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+
return true;
-err_enc:
- drm_encoder_cleanup(&intel_encoder->enc);
err_i2c:
- if (intel_sdvo->analog_ddc_bus != NULL)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ if (sdvo_priv->analog_ddc_bus != NULL)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
if (intel_encoder->ddc_bus != NULL)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (intel_encoder->i2c_bus != NULL)
intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_sdvo);
+ kfree(intel_encoder);
return false;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo_regs.h b/trunk/drivers/gpu/drm/i915/intel_sdvo_regs.h
index a386b022e538..ba5cdf8ae40b 100644
--- a/trunk/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/trunk/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/** 5 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
unsigned int overscan_h:1;
unsigned int overscan_v:1;
- unsigned int hpos:1;
- unsigned int vpos:1;
+ unsigned int position_h:1;
+ unsigned int position_v:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
- unsigned int tv_chroma_filter:1;
- unsigned int tv_luma_filter:1;
+ unsigned int max_tv_chroma_filter:1;
+ unsigned int max_tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
+#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
-#define SDVO_CMD_GET_MAX_HPOS 0x67
-#define SDVO_CMD_GET_MAX_VPOS 0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+#define SDVO_CMD_GET_MAX_POSITION_H 0x67
+#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
-#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
-#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
-#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
-#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
+#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
+#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
-#define SDVO_CMD_GET_HPOS 0x68
-#define SDVO_CMD_SET_HPOS 0x69
-#define SDVO_CMD_GET_VPOS 0x6b
-#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_POSITION_H 0x68
+#define SDVO_CMD_SET_POSITION_H 0x69
+#define SDVO_CMD_GET_POSITION_V 0x6b
+#define SDVO_CMD_SET_POSITION_V 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
-#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
-#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
-#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
-#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+#define SDVO_CMD_GET_TV_CHROMA 0x75
+#define SDVO_CMD_SET_TV_CHROMA 0x76
+#define SDVO_CMD_GET_TV_LUMA 0x78
+#define SDVO_CMD_SET_TV_LUMA 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
}__attribute__((packed));
diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c
index c671f60ce80b..cc3726a4a1cb 100644
--- a/trunk/drivers/gpu/drm/i915/intel_tv.c
+++ b/trunk/drivers/gpu/drm/i915/intel_tv.c
@@ -44,9 +44,7 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
-struct intel_tv {
- struct intel_encoder base;
-
+struct intel_tv_priv {
int type;
char *tv_format;
int margin[4];
@@ -898,11 +896,6 @@ static const struct tv_mode tv_modes[] = {
},
};
-static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
-}
-
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
@@ -936,17 +929,19 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find (struct intel_encoder *intel_encoder)
{
- return intel_tv_mode_lookup(intel_tv->tv_format);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+
+ return intel_tv_mode_lookup(tv_priv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -962,8 +957,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -988,8 +983,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1005,7 +1001,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
- switch (intel_tv->type) {
+ switch (tv_priv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
@@ -1158,11 +1154,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Wait for vblank for the disable to take effect */
if (!IS_I9XX(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1172,12 +1168,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
ysize = 2*tv_mode->nbr_end + 1;
- xpos += intel_tv->margin[TV_MARGIN_LEFT];
- ypos += intel_tv->margin[TV_MARGIN_TOP];
- xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
- intel_tv->margin[TV_MARGIN_RIGHT]);
- ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ xpos += tv_priv->margin[TV_MARGIN_LEFT];
+ ypos += tv_priv->margin[TV_MARGIN_TOP];
+ xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
+ tv_priv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1226,9 +1222,9 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_tv->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1267,15 +1263,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- POSTING_READ(TV_DAC);
- msleep(20);
-
+ intel_wait_for_vblank(dev);
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
- msleep(20);
-
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
@@ -1312,11 +1304,12 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
@@ -1324,12 +1317,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
- intel_tv->tv_format = tv_mode->name;
+ tv_priv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1343,31 +1336,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector)
{
+ struct drm_crtc *crtc;
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- int type;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ int dpms_mode;
+ int type = tv_priv->type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- struct drm_crtc *crtc;
- int dpms_mode;
-
- crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(intel_tv);
- intel_release_load_detect_pipe(&intel_tv->base, connector,
+ type = intel_tv_detect_type(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder, connector,
dpms_mode);
} else
type = -1;
}
- intel_tv->type = type;
+ tv_priv->type = type;
if (type < 0)
return connector_status_disconnected;
@@ -1398,8 +1391,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1424,8 +1417,8 @@ intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1490,7 +1483,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1500,30 +1494,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
- intel_tv->margin[TV_MARGIN_LEFT] != val) {
- intel_tv->margin[TV_MARGIN_LEFT] = val;
+ tv_priv->margin[TV_MARGIN_LEFT] != val) {
+ tv_priv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
- intel_tv->margin[TV_MARGIN_RIGHT] != val) {
- intel_tv->margin[TV_MARGIN_RIGHT] = val;
+ tv_priv->margin[TV_MARGIN_RIGHT] != val) {
+ tv_priv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
- intel_tv->margin[TV_MARGIN_TOP] != val) {
- intel_tv->margin[TV_MARGIN_TOP] = val;
+ tv_priv->margin[TV_MARGIN_TOP] != val) {
+ tv_priv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
- intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
- intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+ tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
+ tv_priv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
- if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+ if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
goto out;
- intel_tv->tv_format = tv_modes[val].name;
+ tv_priv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
@@ -1559,8 +1553,16 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
+static void intel_tv_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_tv_enc_destroy,
};
/*
@@ -1604,9 +1606,9 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
int i, initial_mode = 0;
@@ -1645,18 +1647,18 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
- if (!intel_tv) {
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_tv_priv), GFP_KERNEL);
+ if (!intel_encoder) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_tv);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1666,20 +1668,22 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+ intel_encoder->dev_priv = tv_priv;
+ tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- intel_tv->margin[TV_MARGIN_LEFT] = 54;
- intel_tv->margin[TV_MARGIN_TOP] = 36;
- intel_tv->margin[TV_MARGIN_RIGHT] = 46;
- intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+ tv_priv->margin[TV_MARGIN_LEFT] = 54;
+ tv_priv->margin[TV_MARGIN_TOP] = 36;
+ tv_priv->margin[TV_MARGIN_RIGHT] = 46;
+ tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1699,16 +1703,16 @@ intel_tv_init(struct drm_device *dev)
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
- intel_tv->margin[TV_MARGIN_LEFT]);
+ tv_priv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
- intel_tv->margin[TV_MARGIN_TOP]);
+ tv_priv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
- intel_tv->margin[TV_MARGIN_RIGHT]);
+ tv_priv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
out:
drm_sysfs_connector_add(connector);
}
diff --git a/trunk/drivers/gpu/drm/mga/mga_state.c b/trunk/drivers/gpu/drm/mga/mga_state.c
index 9ce2827f8c00..fff82045c427 100644
--- a/trunk/drivers/gpu/drm/mga/mga_state.c
+++ b/trunk/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@ file_priv)
}
struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
index 974b0f8ae048..0b69a9628c95 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
uint32_t val = 0;
if (off < pci_resource_len(dev->pdev, 1)) {
- uint8_t __iomem *p =
+ uint32_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
uint32_t off, uint32_t val)
{
if (off < pci_resource_len(dev->pdev, 1)) {
- uint8_t __iomem *p =
+ uint32_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,10 +3869,27 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if (script == LVDS_RESET &&
- (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329))
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+ if ((dev->pci_device & 0xffff) == 0x0179 ||
+ (dev->pci_device & 0xffff) == 0x0189 ||
+ (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_RESET) {
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+
+ } else if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+
+ } else if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ }
+ }
#endif
return 0;
@@ -4364,8 +4381,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
*
* For the moment, a quirk will do :)
*/
- if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
+ if ((dev->pdev->device == 0x01d7) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x01c2)) {
bios->fp.duallink_transition_clk = 80000;
+ }
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4567,7 +4587,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
+ NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
@@ -4577,7 +4597,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
+ NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
@@ -4590,7 +4610,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
+ NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
@@ -4602,7 +4622,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
+ NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
@@ -4614,7 +4634,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
return 1;
}
- NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
+ NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
}
@@ -5337,17 +5357,19 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
}
tmdstableptr = ROM16(bios->data[bitentry->offset]);
- if (!tmdstableptr) {
+
+ if (tmdstableptr == 0x0) {
NV_ERROR(dev, "Pointer to TMDS table invalid\n");
return -EINVAL;
}
- NV_INFO(dev, "TMDS table version %d.%d\n",
- bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
-
/* nv50+ has v2.0, but we don't parse it atm */
- if (bios->data[tmdstableptr] != 0x11)
+ if (bios->data[tmdstableptr] != 0x11) {
+ NV_WARN(dev,
+ "TMDS table revision %d.%d not currently supported\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
return -ENOSYS;
+ }
/*
* These two scripts are odd: they don't seem to get run even when
@@ -5787,20 +5809,6 @@ parse_dcb_gpio_table(struct nvbios *bios)
gpio->line = tvdac_gpio[1] >> 4;
gpio->invert = tvdac_gpio[0] & 2;
}
- } else {
- /*
- * No systematic way to store GPIO info on pre-v2.2
- * DCBs, try to match the PCI device IDs.
- */
-
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
-
- gpio->tag = DCB_GPIO_TVDAC0;
- gpio->line = 4;
- }
-
}
if (!gpio_table_ptr)
@@ -5876,7 +5884,9 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
struct drm_device *dev = bios->dev;
/* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+ if ((dev->pdev->device == 0x0421) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x344c)) {
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
@@ -6129,7 +6139,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
break;
- case OUTPUT_EOL:
+ case 0xe:
/* weird g80 mobile type that "nv" treats as a terminator */
dcb->entries--;
return false;
@@ -6166,15 +6176,23 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = OUTPUT_TV;
break;
case 2:
- case 4:
- if (conn & 0x10)
- entry->type = OUTPUT_LVDS;
- else
- entry->type = OUTPUT_TMDS;
- break;
case 3:
entry->type = OUTPUT_LVDS;
break;
+ case 4:
+ switch ((conn & 0x000000f0) >> 4) {
+ case 0:
+ entry->type = OUTPUT_TMDS;
+ break;
+ case 1:
+ entry->type = OUTPUT_LVDS;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
+ (conn & 0x000000f0) >> 4);
+ return false;
+ }
+ break;
default:
NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
return false;
@@ -6289,7 +6307,9 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
* nasty problems until this is sorted (assuming it's not a
* VBIOS bug).
*/
- if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
+ if ((dev->pdev->device == 0x040d) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x019b)) {
if (*conn == 0x02026312 && *conf == 0x00000020)
return false;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h
index c1de2f3fcb0e..fd14dfd3d780 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,7 +95,6 @@ enum dcb_type {
OUTPUT_TMDS = 2,
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
- OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
OUTPUT_ANY = -1
};
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
index f6f44779d82f..84f85183d041 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,21 +36,6 @@
#include
#include
-int
-nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
-{
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- int ret;
-
- if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
- return 0;
-
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- return ret;
-}
-
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_channel.c b/trunk/drivers/gpu/drm/nouveau/nouveau_channel.c
index 0480f064f2c1..90fdcda332be 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
index a1473fff06ac..b1b22baf1428 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_i2c_chan *i2c;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -117,9 +117,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
-
- if (nv_encoder->dcb->i2c_index < 0xf)
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
index b1be617373b6..e424bf74d706 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,7 +1165,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
-extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1389,15 +1388,6 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
-static inline bool
-nv_match_device(struct drm_device *dev, unsigned device,
- unsigned sub_vendor, unsigned sub_device)
-{
- return dev->pdev->device == device &&
- dev->pdev->subsystem_vendor == sub_vendor &&
- dev->pdev->subsystem_device == sub_device;
-}
-
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c b/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
index 87ac21ec23d2..6b208ffafa8d 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,17 +64,16 @@ nouveau_fence_update(struct nouveau_channel *chan)
struct nouveau_fence *fence;
uint32_t sequence;
- spin_lock(&chan->fence.lock);
-
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
- goto out;
+ return;
chan->fence.sequence_ack = sequence;
+ spin_lock(&chan->fence.lock);
list_for_each_safe(entry, tmp, &chan->fence.pending) {
fence = list_entry(entry, struct nouveau_fence, entry);
@@ -86,7 +85,6 @@ nouveau_fence_update(struct nouveau_channel *chan)
if (sequence == chan->fence.sequence_ack)
break;
}
-out:
spin_unlock(&chan->fence.lock);
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
index ead7b8fc53fc..0f417ac1b696 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference_unlocked(nvbo->gem);
+ drm_gem_object_unreference(nvbo->gem);
}
}
@@ -300,7 +300,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
validate_fini(op, NULL);
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_unreference(gem);
if (ret) {
NV_ERROR(dev, "fail reserve\n");
return ret;
@@ -337,9 +337,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
return -EINVAL;
}
- mutex_unlock(&drm_global_mutex);
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- mutex_lock(&drm_global_mutex);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
@@ -363,11 +361,16 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- ret = nouveau_bo_sync_gpu(nvbo, chan);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail pre-validate sync\n");
- return ret;
+ if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail wait other chan\n");
+ return ret;
+ }
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -378,7 +381,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
+ nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
@@ -387,12 +390,6 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_sync_gpu(nvbo, chan);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail post-validate sync\n");
- return ret;
- }
-
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -616,20 +613,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return PTR_ERR(bo);
}
- /* Mark push buffers as being used on PFIFO, the validation code
- * will then make sure that if the pushbuf bo moves, that they
- * happen on the kernel channel, which will in turn cause a sync
- * to happen before we try and submit the push buffer.
- */
- for (i = 0; i < req->nr_push; i++) {
- if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(dev, "push %d buffer not in list\n", i);
- ret = -EINVAL;
- goto out;
- }
-
- bo[push[i].bo_index].read_domains |= (1 << 31);
- }
+ mutex_lock(&dev->struct_mutex);
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -663,7 +647,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (dev_priv->chipset >= 0x25) {
+ if (dev_priv->card_type >= NV_20) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -729,6 +713,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence);
+ mutex_unlock(&dev->struct_mutex);
kfree(bo);
kfree(push);
@@ -737,7 +722,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (dev_priv->chipset >= 0x25) {
+ if (dev_priv->card_type >= NV_20) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 84614858728b..0bd407ca3d42 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 6b9187d7f67d..491767fe4fcf 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,7 +214,6 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -240,19 +239,10 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
- if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -ENOMEM;
- }
-
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -EFAULT;
- }
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
index 0d3206a7046c..a5dcf7685800 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,7 +444,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
- struct drm_encoder *slave_encoder;
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -463,10 +462,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
/* Init external transmitters */
- slave_encoder = get_tmds_slave(encoder);
- if (slave_encoder)
- get_slave_funcs(slave_encoder)->mode_set(
- slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
+ if (get_tmds_slave(encoder))
+ get_slave_funcs(get_tmds_slave(encoder))->mode_set(
+ encoder, &nv_encoder->mode, &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -475,27 +473,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
-static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
-{
-#ifdef __powerpc__
- struct drm_device *dev = encoder->dev;
-
- /* BIOS scripts usually take care of the backlight, thanks
- * Apple for your consistency.
- */
- if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
- dev->pci_device == 0x0329) {
- if (mode == DRM_MODE_DPMS_ON) {
- nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
- nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
- } else {
- nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
- nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
- }
- }
-#endif
-}
-
static inline bool is_powersaving_dpms(int mode)
{
return (mode != DRM_MODE_DPMS_ON);
@@ -543,7 +520,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
LVDS_PANEL_OFF, 0);
}
- nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
if (mode == DRM_MODE_DPMS_ON)
@@ -567,7 +543,6 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
- nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c
index 13cdc05b7c2d..44fefb0c7083 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,14 +121,10 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
/* Zotac FX5200 */
- if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
- nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
- *pin_mask = 0xc;
- return false;
- }
-
- /* MSI nForce2 IGP */
- if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
+ if (dev->pdev->device == 0x0322 &&
+ dev->pdev->subsystem_vendor == 0x19da &&
+ (dev->pdev->subsystem_device == 0x1035 ||
+ dev->pdev->subsystem_device == 0x2035)) {
*pin_mask = 0xc;
return false;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
index 91ef93cf1f35..37c7b48ab24a 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,8 +139,6 @@ nv50_instmem_init(struct drm_device *dev)
chan->file_priv = (struct drm_file *)-2;
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
- INIT_LIST_HEAD(&chan->ramht_refs);
-
/* Channel's PRAMIN object + heap */
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
NULL, &chan->ramin);
@@ -280,7 +278,7 @@ nv50_instmem_init(struct drm_device *dev)
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
return 0;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_instmem.c b/trunk/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 6b451f864783..3ab3cdc42173 100644
--- a/trunk/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,16 +142,14 @@ int
nvc0_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf;
int i;
dev_priv->susres.ramin_copy = vmalloc(65536);
if (!dev_priv->susres.ramin_copy)
return -ENOMEM;
- buf = dev_priv->susres.ramin_copy;
- for (i = 0; i < 65536; i += 4)
- buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ for (i = 0x700000; i < 0x710000; i += 4)
+ dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
return 0;
}
@@ -159,15 +157,14 @@ void
nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf = dev_priv->susres.ramin_copy;
u64 chan;
int i;
chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
nv_wr32(dev, 0x001700, chan >> 16);
- for (i = 0; i < 65536; i += 4)
- nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
+ for (i = 0x700000; i < 0x710000; i += 4)
+ nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
vfree(dev_priv->susres.ramin_copy);
dev_priv->susres.ramin_copy = NULL;
@@ -224,7 +221,7 @@ nvc0_instmem_init(struct drm_device *dev)
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
return 0;
}
diff --git a/trunk/drivers/gpu/drm/r128/r128_state.c b/trunk/drivers/gpu/drm/r128/r128_state.c
index a9e33ce65918..077af1f2f9b4 100644
--- a/trunk/drivers/gpu/drm/r128/r128_state.c
+++ b/trunk/drivers/gpu/drm/r128/r128_state.c
@@ -1639,29 +1639,30 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
r128_do_cleanup_pageflip(dev);
}
}
+
void r128_driver_lastclose(struct drm_device *dev)
{
r128_do_cleanup_cce(dev);
}
struct drm_ioctl_desc r128_ioctls[] = {
- DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
index 464a81a1990f..12ad512bd3d3 100644
--- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,11 +332,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
- args.ucOverscanRight = radeon_crtc->h_border;
- args.ucOverscanLeft = radeon_crtc->h_border;
- args.ucOverscanBottom = radeon_crtc->v_border;
- args.ucOverscanTop = radeon_crtc->v_border;
-
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -476,8 +471,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
- u32 dp_clock = mode->clock;
- int bpc = 8;
/* reset the pll flags */
pll->flags = 0;
@@ -520,17 +513,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- }
- }
-
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -539,20 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->algo = PLL_ALGO_LEGACY;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
}
- /* There is some evidence (often anecdotal) that RV515 LVDS
- * (on some boards at least) prefers the legacy algo. I'm not
- * sure whether this should handled generically or on a
- * case-by-case quirk basis. Both algos should work fine in the
- * majority of cases.
- */
- if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
- (rdev->family == CHIP_RV515)) {
- /* allow the user to overrride just in case */
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
- }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -587,14 +555,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP eventually */
- /* args.v1.ucConfig |=
- ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
- } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
- args.v1.ucConfig |=
- ADJUST_DISPLAY_CONFIG_SS_ENABLE;
- }
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -608,20 +568,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- /* 16200 or 27000 */
- args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else {
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
- /* deep color support */
- args.v3.sInput.usPixelClock =
- cpu_to_le16((mode->clock * bpc / 8) / 10);
- }
+ else {
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -630,19 +580,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- /* 16200 or 27000 */
- args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
- /* want to enable SS on LVDS eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
- } else {
+ else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
@@ -1075,11 +1019,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
- WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
- WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
} else {
- WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
- WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
}
}
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1216,18 +1160,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- bool is_tvcv = false;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /* find tv std */
- if (encoder->crtc == crtc) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->active_device &
- (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- is_tvcv = true;
- }
- }
+ /* TODO color tiling */
atombios_disable_ss(crtc);
/* always set DCPLL */
@@ -1236,14 +1170,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_enable_ss(crtc);
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_AVIVO(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- else if (ASIC_IS_AVIVO(rdev)) {
- if (is_tvcv)
- atombios_crtc_set_timing(crtc, adjusted_mode);
- else
- atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- } else {
+ else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d44b8d..36e0d4b545e6 100644
--- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig->linkb)
+ if (dig_connector->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c
index b8b7f010b25f..957d5067ad9c 100644
--- a/trunk/drivers/gpu/drm/radeon/evergreen.c
+++ b/trunk/drivers/gpu/drm/radeon/evergreen.c
@@ -675,43 +675,6 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
-static int evergreen_cp_start(struct radeon_device *rdev)
-{
- int r;
- uint32_t cp_me;
-
- r = radeon_ring_lock(rdev, 7);
- if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
- return r;
- }
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
-
- cp_me = 0xff;
- WREG32(CP_ME_CNTL, cp_me);
-
- r = radeon_ring_lock(rdev, 4);
- if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
- return r;
- }
- /* init some VGT regs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, 0xe);
- radeon_ring_write(rdev, 0x10);
- radeon_ring_unlock_commit(rdev);
-
- return 0;
-}
-
int evergreen_cp_resume(struct radeon_device *rdev)
{
u32 tmp;
@@ -756,7 +719,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
- evergreen_cp_start(rdev);
+ r600_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
@@ -2091,6 +2054,11 @@ int evergreen_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
r = evergreen_startup(rdev);
if (r) {
@@ -2196,6 +2164,9 @@ int evergreen_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2265,6 +2236,7 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c
index afc18d87fdca..d0ebae9dde25 100644
--- a/trunk/drivers/gpu/drm/radeon/r600.c
+++ b/trunk/drivers/gpu/drm/radeon/r600.c
@@ -2119,7 +2119,10 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
@@ -2486,6 +2489,11 @@ int r600_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
r = r600_startup(rdev);
if (r) {
@@ -2578,6 +2586,9 @@ int r600_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2652,6 +2663,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -3529,7 +3541,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
WREG32(HDP_DEBUG1, 0);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h
index a168d644bf9e..3dfcfa3ca425 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon.h
@@ -1013,11 +1013,6 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-/* VRAM scratch page for HDP bug */
-struct r700_vram_scratch {
- struct radeon_bo *robj;
- volatile uint32_t *ptr;
-};
/*
* Core structure, functions and helpers.
@@ -1084,7 +1079,6 @@ struct radeon_device {
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
- struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
@@ -1339,6 +1333,8 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+extern int radeon_clocks_init(struct radeon_device *rdev);
+extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_agp.c b/trunk/drivers/gpu/drm/radeon/radeon_agp.c
index bd2f33e5c91a..f40dfb77f9b1 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,13 +156,7 @@ int radeon_agp_init(struct radeon_device *rdev)
}
mode.mode = info.mode;
- /* chips with the agp to pcie bridge don't have the AGP_STATUS register
- * Just use the whatever mode the host sets up.
- */
- if (rdev->family <= CHIP_RV350)
- agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
- else
- agp_status = mode.mode;
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
if (is_v3) {
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c
index 25e1dd197791..646f96f97c77 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,7 +733,6 @@ static struct radeon_asic evergreen_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
@@ -858,3 +857,21 @@ int radeon_asic_init(struct radeon_device *rdev)
return 0;
}
+/*
+ * Wrapper around modesetting bits. Move to radeon_clocks.c?
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_static_clocks_init(rdev->ddev);
+ if (r) {
+ return r;
+ }
+ DRM_INFO("Clocks initialized !\n");
+ return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
index ebae14c4b768..6d30868744ee 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
uint32_t supported_device);
/* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint32_t igp_lane_info,
+ bool linkb, uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
struct radeon_router *router);
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
uint32_t supported_device);
union atom_supported_devices {
@@ -85,19 +85,6 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (gpio->usClkMaskRegisterIndex == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -160,20 +147,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (gpio->usClkMaskRegisterIndex == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -253,8 +226,6 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_hpd hpd;
u32 reg;
- memset(&hpd, 0, sizeof(struct radeon_hpd));
-
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
@@ -506,6 +477,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
+ bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
struct radeon_router router;
struct radeon_gpio_rec gpio;
@@ -538,7 +510,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
addr += path_size;
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
-
+ linkb = false;
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -629,10 +601,13 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
+ if (grph_obj_num == 2)
+ linkb = true;
+ else
+ linkb = false;
radeon_add_atom_encoder(dev,
- encoder_obj,
+ grph_obj_id,
le16_to_cpu
(path->
usDeviceTag));
@@ -769,7 +744,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- igp_lane_info,
+ linkb, igp_lane_info,
connector_object_id,
&hpd,
&router);
@@ -958,13 +933,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
(1 << i),
dac),
(1 << i));
else
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
(1 << i),
dac),
(1 << i));
@@ -1021,7 +996,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- 0,
+ false, 0,
connector_object_id,
&bios_connectors[i].hpd,
&router);
@@ -1208,7 +1183,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
return true;
break;
case 2:
- if (igp_info->info_2.ulBootUpSidePortClock)
+ if (igp_info->info_2.ucMemoryType & 0x0f)
return true;
break;
default:
@@ -1330,7 +1305,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
- int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1394,12 +1368,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
}
encoder->native_mode = lvds->native_mode;
-
- if (encoder_enum == 2)
- lvds->linkb = true;
- else
- lvds->linkb = false;
-
}
return lvds;
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_clocks.c b/trunk/drivers/gpu/drm/radeon/radeon_clocks.c
index 5249af8931e6..14448a740ba6 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,14 +327,6 @@ void radeon_get_clock_info(struct drm_device *dev)
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
- if (!rdev->clock.default_sclk)
- rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
- if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
- rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
-
- rdev->pm.current_sclk = rdev->clock.default_sclk;
- rdev->pm.current_mclk = rdev->clock.default_mclk;
-
}
/* 10 khz */
@@ -905,3 +897,53 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
}
}
+static void radeon_apply_clock_quirks(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* XXX make sure engine is idle */
+
+ if (rdev->family < CHIP_RS600) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
+ tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
+ if ((rdev->family == CHIP_RV250)
+ || (rdev->family == CHIP_RV280))
+ tmp |=
+ RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
+ if ((rdev->family == CHIP_RV350)
+ || (rdev->family == CHIP_RV380))
+ tmp |= R300_SCLK_FORCE_VAP;
+ if (rdev->family == CHIP_R420)
+ tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if (rdev->family < CHIP_R600) {
+ tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
+ tmp |= AVIVO_CP_FORCEON;
+ WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
+
+ tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
+ tmp |= AVIVO_E2_FORCEON;
+ WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
+
+ tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
+ tmp |= AVIVO_IDCT_FORCEON;
+ WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
+ }
+}
+
+int radeon_static_clocks_init(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* XXX make sure engine is idle */
+
+ if (radeon_dynclks != -1) {
+ if (radeon_dynclks) {
+ if (rdev->asic->set_clock_gating)
+ radeon_set_clock_gating(rdev, 1);
+ }
+ }
+ radeon_apply_clock_quirks(rdev);
+ return 0;
+}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c
index bd74e428bd14..885dcfac1838 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
/* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
uint32_t supported_device);
/* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
else
devices = ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices = ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices = ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tmp & 0x1) {
devices |= ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
COMBIOS_LCD_DDC_INFO_TABLE);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
+ radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum
+ radeon_get_encoder_id
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c
index a9dd7847d96e..47c4b276d30c 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -977,29 +977,27 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ u8 sink_type;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- /* eDP is always DP */
- radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
- if (radeon_dp_getdpcd(radeon_connector))
+ sink_type = radeon_dp_getsinktype(radeon_connector);
+ if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
+ }
} else {
- radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector))
- ret = connector_status_connected;
- } else {
- if (radeon_ddc_probe(radeon_connector))
- ret = connector_status_connected;
+ if (radeon_ddc_probe(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
}
}
- radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -1039,6 +1037,7 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
+ bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
@@ -1051,16 +1050,10 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
+ /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
- /* if the user selected tv=0 don't try and add the connector */
- if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
- (connector_type == DRM_MODE_CONNECTOR_Composite) ||
- (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
- (radeon_tv == 0))
- return;
-
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1135,6 +1128,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
+ radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1164,6 +1158,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
+ radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1187,6 +1182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
+ radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1215,22 +1211,25 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_atombios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ if (radeon_tv == 1) {
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ }
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
+ radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1276,16 +1275,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
+ /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
- /* if the user selected tv=0 don't try and add the connector */
- if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
- (connector_type == DRM_MODE_CONNECTOR_Composite) ||
- (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
- (radeon_tv == 0))
- return;
-
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1357,24 +1350,26 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- /* RS400,RC410,RS480 chipset seems to report a lot
- * of false positive on load detect, we haven't yet
- * found a way to make load detect reliable on those
- * chipset, thus just disable it for TV.
- */
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
- radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_combios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ if (radeon_tv == 1) {
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ radeon_connector->dac_load_detect);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ }
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c
index 256d204a6d24..4f7a170d1566 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_device.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -293,20 +293,30 @@ bool radeon_card_posted(struct radeon_device *rdev)
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
- u32 sclk = rdev->pm.current_sclk;
- u32 mclk = rdev->pm.current_mclk;
-
- /* sclk/mclk in Mhz */
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ u32 sclk, mclk;
if (rdev->flags & RADEON_IS_IGP) {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = rdev->clock.default_mclk;
+
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+ } else {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = radeon_get_memory_clock(rdev);
+
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_display.c b/trunk/drivers/gpu/drm/radeon/radeon_display.c
index 6dd434ad2429..5764f4d3b4f1 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_display.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_display.c
@@ -1094,18 +1094,6 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_i2c_fini(rdev);
}
-static bool is_hdtv_mode(struct drm_display_mode *mode)
-{
- /* try and guess if this is a tv or a monitor */
- if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
- (mode->vdisplay == 576) || /* 576p */
- (mode->vdisplay == 720) || /* 720p */
- (mode->vdisplay == 1080)) /* 1080p */
- return true;
- else
- return false;
-}
-
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1153,8 +1141,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (ASIC_IS_AVIVO(rdev) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid) &&
- is_hdtv_mode(mode)))) {
+ drm_detect_hdmi_monitor(radeon_connector->edid)))) {
radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
index 2c293e8304d6..263c8098d7dd 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
}
uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
else
- ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
else {
/*if (rdev->family == CHIP_R200)
- ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
else*/
- ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
else
- ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
else
- ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
else
- ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
else
- ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
- ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
break;
}
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+static struct radeon_connector_atom_dig *
+radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!rdev->is_atom_bios)
+ return NULL;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return NULL;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_connector->con_priv)
+ return NULL;
+
+ dig_connector = radeon_connector->con_priv;
+
+ return dig_connector;
+}
+
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -486,12 +512,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- if (!dig)
+ if (!dig || !dig_connector)
return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -534,7 +562,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
- if (dig->linkb)
+ if (dig_connector->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -573,7 +601,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
- if (dig->linkb)
+ if (dig_connector->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -595,8 +623,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -610,13 +636,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else if (radeon_connector->use_digital)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -624,13 +646,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -642,13 +660,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_DVIA:
@@ -715,24 +729,13 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
- int dp_clock = 0;
- int dp_lane_count = 0;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- }
-
- /* no dig encoder assigned */
- if (dig->dig_encoder == -1)
+ if (!dig || !dig_connector)
return;
memset(&args, 0, sizeof(args));
@@ -754,9 +757,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
+ if (dig_connector->dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.ucLaneNum = dp_lane_count;
+ args.v1.ucLaneNum = dig_connector->dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
@@ -778,7 +781,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
- if (dig->linkb)
+ if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -801,47 +804,38 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
- int dp_clock = 0;
- int dp_lane_count = 0;
- int connector_object_id = 0;
- int igp_lane_info = 0;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- dp_lane_count = dig_connector->dp_lane_count;
- connector_object_id =
- (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- igp_lane_info = dig_connector->igp_lane_info;
- }
- /* no dig encoder assigned */
- if (dig->dig_encoder == -1)
+ if (!dig || !dig_connector)
return;
+ connector = radeon_get_connector_for_encoder(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
- break;
+ else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
+ break;
+ }
}
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -849,14 +843,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = connector_object_id;
+ args.v1.usInitInfo = radeon_connector->connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
- cpu_to_le16(dp_clock / 10);
+ cpu_to_le16(dig_connector->dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -864,13 +858,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
- args.v3.ucLaneNum = dp_lane_count;
+ args.v3.ucLaneNum = dig_connector->dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
- if (dig->linkb) {
+ if (dig_connector->linkb) {
args.v3.acConfig.ucLinkSel = 1;
args.v3.acConfig.ucEncoderSel = 1;
}
@@ -910,7 +904,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
- if (dig->linkb)
+ if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
@@ -944,23 +938,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
- if (igp_lane_info & 0x1)
+ if (dig_connector->igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (igp_lane_info & 0x2)
+ else if (dig_connector->igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (igp_lane_info & 0x4)
+ else if (dig_connector->igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (igp_lane_info & 0x8)
+ else if (dig_connector->igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
- if (igp_lane_info & 0x3)
+ if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (igp_lane_info & 0xc)
+ else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
- if (dig->linkb)
+ if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1078,7 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1090,7 +1085,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1294,22 +1290,24 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
uint32_t dig_enc_in_use = 0;
if (ASIC_IS_DCE4(rdev)) {
- dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
+ if (dig_connector->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
+ if (dig_connector->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
+ if (dig_connector->linkb)
return 5;
else
return 4;
@@ -1643,7 +1641,6 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
- int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
if (!dig)
@@ -1653,16 +1650,11 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
dig->coherent_mode = true;
dig->dig_encoder = -1;
- if (encoder_enum == 2)
- dig->linkb = true;
- else
- dig->linkb = false;
-
return dig;
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1671,7 +1663,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_enum == encoder_enum) {
+ if (radeon_encoder->encoder_id == encoder_id) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1699,8 +1691,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_enum = encoder_enum;
- radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->encoder_id = encoder_id;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fb.c b/trunk/drivers/gpu/drm/radeon/radeon_fb.c
index c74a8b20d941..dbf86962bdd1 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, true,
+ false, ttm_bo_type_kernel,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_i2c.c b/trunk/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..bfd2ce5f5372 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,13 +99,6 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
}
}
- /* switch the pads to ddc mode */
- if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
- temp = RREG32(rec->mask_clk_reg);
- temp &= ~(1 << 16);
- WREG32(rec->mask_clk_reg, temp);
- }
-
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
@@ -213,7 +206,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
- u32 sclk = rdev->pm.current_sclk;
+ u32 sclk = radeon_get_engine_clock(rdev);
u32 prescale = 0;
u32 nm;
u8 n, m, loop;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f5..059bfa4098d7 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,12 +121,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
- (!(rdev->flags & RADEON_IS_AGP))) {
+ (!(rdev->flags & RADEON_IS_IGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
- dev_info(rdev->dev, "radeon: using MSI.\n");
+ DRM_INFO("radeon: using MSI.\n");
}
}
rdev->irq.installed = true;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c41d124..b1c8ace5f080 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,7 +161,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
}
- break;
case RADEON_INFO_WANT_HYPERZ:
/* The "value" here is both an input and output parameter.
* If the input value is 1, filp requests hyper-z access.
@@ -324,45 +323,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 305049afde15..989df519a1e4 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
if (!ref_div)
return 1;
- vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+ vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0b8397000f4c..b8149cbc0c70 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
}
void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_enum == encoder_enum) {
+ if (radeon_encoder->encoder_id == encoder_id) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1374,8 +1374,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_enum = encoder_enum;
- radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->encoder_id = encoder_id;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
index efbe975312dc..5bbc086b9267 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
@@ -342,7 +342,6 @@ struct radeon_atom_ss {
};
struct radeon_encoder_atom_dig {
- bool linkb;
/* atom dig */
bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -361,7 +360,6 @@ struct radeon_encoder_atom_dac {
struct radeon_encoder {
struct drm_encoder base;
- uint32_t encoder_enum;
uint32_t encoder_id;
uint32_t devices;
uint32_t active_device;
@@ -380,6 +378,7 @@ struct radeon_encoder {
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
+ bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
@@ -600,6 +599,7 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
+extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_pm.c b/trunk/drivers/gpu/drm/radeon/radeon_pm.c
index f87efec76236..58038f5cab38 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,11 +226,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
int i;
- /* no need to take locks, etc. if nothing's going to change */
- if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
- (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
- return;
-
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
@@ -637,6 +632,8 @@ void radeon_pm_fini(struct radeon_device *rdev)
}
radeon_hwmon_fini(rdev);
+ if (rdev->pm.i2c_bus)
+ radeon_i2c_destroy(rdev->pm.i2c_bus);
}
void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_state.c b/trunk/drivers/gpu/drm/radeon/radeon_state.c
index 4ae5a3d1074e..b3ba44c0a818 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_state.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc radeon_ioctls[] = {
- DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c
index bfa59db374d2..f1c796810117 100644
--- a/trunk/drivers/gpu/drm/radeon/rv770.c
+++ b/trunk/drivers/gpu/drm/radeon/rv770.c
@@ -905,54 +905,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
-static int rv770_vram_scratch_init(struct radeon_device *rdev)
-{
- int r;
- u64 gpu_addr;
-
- if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->vram_scratch.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- return r;
- }
- r = radeon_bo_kmap(rdev->vram_scratch.robj,
- (void **)&rdev->vram_scratch.ptr);
- if (r)
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
-
- return r;
-}
-
-static void rv770_vram_scratch_fini(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->vram_scratch.robj == NULL) {
- return;
- }
- r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->vram_scratch.robj);
- radeon_bo_unpin(rdev->vram_scratch.robj);
- radeon_bo_unreserve(rdev->vram_scratch.robj);
- }
- radeon_bo_unref(&rdev->vram_scratch.robj);
-}
-
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -1018,9 +970,6 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
}
- r = rv770_vram_scratch_init(rdev);
- if (r)
- return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
@@ -1074,6 +1023,11 @@ int rv770_resume(struct radeon_device *rdev)
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
r = rv770_startup(rdev);
if (r) {
@@ -1164,6 +1118,9 @@ int rv770_init(struct radeon_device *rdev)
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1238,9 +1195,9 @@ void rv770_fini(struct radeon_device *rdev)
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
- rv770_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/trunk/drivers/gpu/drm/savage/savage_bci.c b/trunk/drivers/gpu/drm/savage/savage_bci.c
index bf5f83ea14fe..976dc8d25280 100644
--- a/trunk/drivers/gpu/drm/savage/savage_bci.c
+++ b/trunk/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
struct drm_ioctl_desc savage_ioctls[] = {
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/trunk/drivers/gpu/drm/sis/sis_mm.c b/trunk/drivers/gpu/drm/sis/sis_mm.c
index 7fe2b63412ce..07d0f2979cac 100644
--- a/trunk/drivers/gpu/drm/sis/sis_mm.c
+++ b/trunk/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
}
struct drm_ioctl_desc sis_ioctls[] = {
- DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/trunk/drivers/gpu/drm/via/via_dma.c b/trunk/drivers/gpu/drm/via/via_dma.c
index cc0ffa9abd00..68dda74a50ae 100644
--- a/trunk/drivers/gpu/drm/via/via_dma.c
+++ b/trunk/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
}
struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+ DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 72ec2e2b6e97..9dd395b90216 100644
--- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
/**
* Ioctl definitions.
*/
static struct drm_ioctl_desc vmw_ioctls[] = {
- VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c
index 0c52899be964..e635199a0cd2 100644
--- a/trunk/drivers/hid/hid-core.c
+++ b/trunk/drivers/hid/hid-core.c
@@ -1299,7 +1299,6 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
diff --git a/trunk/drivers/hid/hid-egalax.c b/trunk/drivers/hid/hid-egalax.c
index 8ca7f65cf2f8..f44bdc084cb2 100644
--- a/trunk/drivers/hid/hid-egalax.c
+++ b/trunk/drivers/hid/hid-egalax.c
@@ -159,13 +159,6 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
{
struct egalax_data *td = hid_get_drvdata(hid);
- /* Note, eGalax has two product lines: the first is resistive and
- * uses a standard parallel multitouch protocol (product ID ==
- * 48xx). The second is capacitive and uses an unusual "serial"
- * protocol with a different message for each multitouch finger
- * (product ID == 72xx). We do not yet generate a correct event
- * sequence for the capacitive/serial protocol.
- */
if (hid->claimed & HID_CLAIMED_INPUT) {
struct input_dev *input = field->hidinput->input;
@@ -253,8 +246,6 @@ static void egalax_remove(struct hid_device *hdev)
static const struct hid_device_id egalax_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
{ }
};
MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h
index 85c6d13c9ffa..d3fc13ae094d 100644
--- a/trunk/drivers/hid/hid-ids.h
+++ b/trunk/drivers/hid/hid-ids.h
@@ -188,7 +188,6 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/trunk/drivers/hid/hid-picolcd.c b/trunk/drivers/hid/hid-picolcd.c
index bc2e07740628..346f0e34987e 100644
--- a/trunk/drivers/hid/hid-picolcd.c
+++ b/trunk/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
ref_cnt--;
mutex_lock(&info->lock);
(*ref_cnt)--;
- may_release = !*ref_cnt;
+ may_release = !ref_cnt;
mutex_unlock(&info->lock);
if (may_release) {
- vfree((u8 *)info->fix.smem_start);
framebuffer_release(info);
+ vfree((u8 *)info->fix.smem_start);
}
}
diff --git a/trunk/drivers/hid/usbhid/hiddev.c b/trunk/drivers/hid/usbhid/hiddev.c
index 0a29c51114aa..254a003af048 100644
--- a/trunk/drivers/hid/usbhid/hiddev.c
+++ b/trunk/drivers/hid/usbhid/hiddev.c
@@ -266,15 +266,13 @@ static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
struct usb_interface *intf;
- struct hid_device *hid;
struct hiddev *hiddev;
int res;
intf = usb_find_interface(&hiddev_driver, iminor(inode));
if (!intf)
return -ENODEV;
- hid = usb_get_intfdata(intf);
- hiddev = hid->hiddev;
+ hiddev = usb_get_intfdata(intf);
if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
return -ENOMEM;
@@ -589,7 +587,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hiddev_list *list = file->private_data;
struct hiddev *hiddev = list->hiddev;
struct hid_device *hid = hiddev->hid;
- struct usb_device *dev;
+ struct usb_device *dev = hid_to_usb_dev(hid);
struct hiddev_collection_info cinfo;
struct hiddev_report_info rinfo;
struct hiddev_field_info finfo;
@@ -603,11 +601,9 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Called without BKL by compat methods so no BKL taken */
/* FIXME: Who or what stop this racing with a disconnect ?? */
- if (!hiddev->exist || !hid)
+ if (!hiddev->exist)
return -EIO;
- dev = hid_to_usb_dev(hid);
-
switch (cmd) {
case HIDIOCGVERSION:
@@ -892,6 +888,7 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hid->hiddev = hiddev;
hiddev->hid = hid;
hiddev->exist = 1;
+ usb_set_intfdata(usbhid->intf, usbhid);
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
err_hid("Not able to get a minor for this device.");
diff --git a/trunk/drivers/hwmon/Kconfig b/trunk/drivers/hwmon/Kconfig
index 4d4d09bdec0a..0fba82943125 100644
--- a/trunk/drivers/hwmon/Kconfig
+++ b/trunk/drivers/hwmon/Kconfig
@@ -332,11 +332,11 @@ config SENSORS_F71805F
will be called f71805f.
config SENSORS_F71882FG
- tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
+ tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000"
depends on EXPERIMENTAL
help
- If you say yes here you get support for hardware monitoring
- features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
+ If you say yes here you get support for hardware monitoring features
+ of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
F71889FG and F8000 Super-I/O chips.
This driver can also be built as a module. If so, the module
diff --git a/trunk/drivers/hwmon/ads7871.c b/trunk/drivers/hwmon/ads7871.c
index 52319340e182..b300a2048af1 100644
--- a/trunk/drivers/hwmon/ads7871.c
+++ b/trunk/drivers/hwmon/ads7871.c
@@ -160,12 +160,30 @@ static const struct attribute_group ads7871_group = {
static int __devinit ads7871_probe(struct spi_device *spi)
{
- int ret, err;
+ int status, ret, err = 0;
uint8_t val;
struct ads7871_data *pdata;
dev_dbg(&spi->dev, "probe\n");
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (status < 0)
+ goto error_free;
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
+ goto error_remove;
+ }
+
+ spi_set_drvdata(spi, pdata);
+
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
spi->bits_per_word = 8;
@@ -183,24 +201,6 @@ static int __devinit ads7871_probe(struct spi_device *spi)
we need to make sure we really have a chip*/
if (val != ret) {
err = -ENODEV;
- goto exit;
- }
-
- pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
- if (!pdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (err < 0)
- goto error_free;
-
- spi_set_drvdata(spi, pdata);
-
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
goto error_remove;
}
diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c
index de8111114f46..c070c9714cbe 100644
--- a/trunk/drivers/hwmon/coretemp.c
+++ b/trunk/drivers/hwmon/coretemp.c
@@ -518,6 +518,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
static int __init coretemp_init(void)
{
int i, err = -ENODEV;
+ struct pdev_entry *p, *n;
/* quick check if we run Intel */
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
diff --git a/trunk/drivers/hwmon/f71882fg.c b/trunk/drivers/hwmon/f71882fg.c
index 537841ef44b9..6207120dcd4d 100644
--- a/trunk/drivers/hwmon/f71882fg.c
+++ b/trunk/drivers/hwmon/f71882fg.c
@@ -45,6 +45,7 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71808_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -96,9 +97,10 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
static const char *f71882fg_names[] = {
+ "f71808fg",
"f71858fg",
"f71862fg",
"f71882fg",
@@ -306,8 +308,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
+/* In attr common to the f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -317,6 +319,22 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+};
+
+/* In attr for the f71808fg */
+static struct sensor_device_attribute_2 f71808_in_attr[] = {
+ SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+ SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+ SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+ SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
+ SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
+ SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
+ SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
+ SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
+};
+
+/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
@@ -355,6 +373,10 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_beep, 0, 6),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+};
+
+/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
+static struct sensor_device_attribute_2 f71862_temp_attr[] = {
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 3),
@@ -989,6 +1011,11 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_type[1] = 6;
break;
}
+ } else if (data->type == f71808fg) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+
} else {
reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
if ((reg2 & 0x03) == 0x01)
@@ -1871,7 +1898,8 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
- if (data->type == f71889fg)
+ if (data->type == f71889fg
+ || data->type == f71808fg)
val = SENSORS_LIMIT(val, -128, 127);
else
val = SENSORS_LIMIT(val, 0, 127);
@@ -1974,8 +2002,28 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* fall through! */
case f71862fg:
err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71862_temp_attr,
+ ARRAY_SIZE(f71862_temp_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_in_attr,
+ ARRAY_SIZE(fxxxx_in_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_temp_attr,
+ ARRAY_SIZE(fxxxx_temp_attr));
+ break;
+ case f71808fg:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71808_in_attr,
+ ARRAY_SIZE(f71808_in_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_temp_attr,
+ ARRAY_SIZE(fxxxx_temp_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
@@ -2002,6 +2050,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
+ case f71808fg:
case f71882fg:
case f71889fg:
err = 0;
@@ -2047,6 +2096,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
+ case f71808fg:
case f71889fg:
for (i = 0; i < nr_fans; i++) {
data->pwm_auto_point_mapping[i] =
@@ -2126,8 +2176,22 @@ static int f71882fg_remove(struct platform_device *pdev)
/* fall through! */
case f71862fg:
f71882fg_remove_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71862_temp_attr,
+ ARRAY_SIZE(f71862_temp_attr));
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in_attr,
+ ARRAY_SIZE(fxxxx_in_attr));
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_temp_attr,
+ ARRAY_SIZE(fxxxx_temp_attr));
+ break;
+ case f71808fg:
+ f71882fg_remove_sysfs_files(pdev,
+ f71808_in_attr,
+ ARRAY_SIZE(f71808_in_attr));
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_temp_attr,
+ ARRAY_SIZE(fxxxx_temp_attr));
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
@@ -2195,6 +2259,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
+ case SIO_F71808_ID:
+ sio_data->type = f71808fg;
+ break;
case SIO_F71858_ID:
sio_data->type = f71858fg;
break;
diff --git a/trunk/drivers/hwmon/hp_accel.c b/trunk/drivers/hwmon/hp_accel.c
index 36e957532230..7580f55e67e3 100644
--- a/trunk/drivers/hwmon/hp_accel.c
+++ b/trunk/drivers/hwmon/hp_accel.c
@@ -221,8 +221,6 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
- AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
- AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/trunk/drivers/hwmon/k8temp.c b/trunk/drivers/hwmon/k8temp.c
index 39ead2a4d3c5..b9bb3e0ca530 100644
--- a/trunk/drivers/hwmon/k8temp.c
+++ b/trunk/drivers/hwmon/k8temp.c
@@ -143,37 +143,6 @@ static const struct pci_device_id k8temp_ids[] = {
MODULE_DEVICE_TABLE(pci, k8temp_ids);
-static int __devinit is_rev_g_desktop(u8 model)
-{
- u32 brandidx;
-
- if (model < 0x69)
- return 0;
-
- if (model == 0xc1 || model == 0x6c || model == 0x7c)
- return 0;
-
- /*
- * Differentiate between AM2 and ASB1.
- * See "Constructing the processor Name String" in "Revision
- * Guide for AMD NPT Family 0Fh Processors" (33610).
- */
- brandidx = cpuid_ebx(0x80000001);
- brandidx = (brandidx >> 9) & 0x1f;
-
- /* Single core */
- if ((model == 0x6f || model == 0x7f) &&
- (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
- return 0;
-
- /* Dual core */
- if (model == 0x6b &&
- (brandidx == 0xb || brandidx == 0xc))
- return 0;
-
- return 1;
-}
-
static int __devinit k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -210,7 +179,9 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
"wrong - check erratum #141\n");
}
- if (is_rev_g_desktop(model)) {
+ if ((model >= 0x69) &&
+ !(model == 0xc1 || model == 0x6c || model == 0x7c ||
+ model == 0x6b || model == 0x6f || model == 0x7f)) {
/*
* RevG desktop CPUs (i.e. no socket S1G1 or
* ASB1 parts) need additional offset,
diff --git a/trunk/drivers/ieee1394/ohci1394.c b/trunk/drivers/ieee1394/ohci1394.c
index 50815022cff1..d0dc1db80b29 100644
--- a/trunk/drivers/ieee1394/ohci1394.c
+++ b/trunk/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
if (recv->block_irq_interval * 4 > iso->buf_packets)
recv->block_irq_interval = iso->buf_packets / 4;
if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
+ recv->block_irq_interval = 1;
/* choose a buffer stride */
/* must be a power of 2, and <= PAGE_SIZE */
diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 78fbe9ffe7f0..8f0caf7d4482 100644
--- a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
+#define T3_MAX_CQ_DEPTH 262144
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c
index 61e0efd4ccfb..443cea55daac 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_cm.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,9 +502,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
struct iw_cm_id *cm_id = cm_node->cm_id;
- enum nes_cm_node_state state = cm_node->state;
- cm_node->state = NES_CM_STATE_CLOSED;
- switch (state) {
+ switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -513,6 +511,7 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
case NES_CM_STATE_FIN_WAIT1:
if (cm_node->cm_id)
cm_id->rem_ref(cm_id);
+ cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1440,6 +1439,9 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
break;
case NES_CM_STATE_MPAREQ_RCVD:
passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == NES_SEND_RESET_EVENT)
+ create_event(cm_node, NES_CM_EVENT_RESET);
+ cm_node->state = NES_CM_STATE_CLOSED;
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1454,7 +1456,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
- case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2776,12 +2777,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -EINVAL;
}
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT) {
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return -ECONNRESET;
- }
-
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2984,6 +2979,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
"ret=%d\n", __func__, __LINE__, ret);
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == NES_SEND_RESET_EVENT)
+ create_event(cm_node, NES_CM_EVENT_RESET);
return 0;
}
diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.c b/trunk/drivers/infiniband/hw/nes/nes_hw.c
index 1980a461c499..f8233c851c69 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_hw.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,19 +3468,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
return; /* Ignore it, wait for close complete */
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
- (nesqp->ibqp_state == IB_QPS_RTS) &&
- ((nesadapter->eeprom_version >> 16) != NES_A0)) {
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- nes_cm_disconn(nesqp);
- }
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3490,6 +3477,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
+
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
if (nesqp->term_flags) {
diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.h b/trunk/drivers/infiniband/hw/nes/nes_hw.h
index 1204c3432b63..aa9183db32b1 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_hw.h
+++ b/trunk/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,7 +45,6 @@
#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
-#define NES_A0 3
enum pci_regs {
NES_INT_STAT = 0x0000,
diff --git a/trunk/drivers/infiniband/hw/nes/nes_nic.c b/trunk/drivers/infiniband/hw/nes/nes_nic.c
index 10560c796fd6..6dfdd49cdbcf 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_nic.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c
index ab6982056518..a9b025f4147a 100644
--- a/trunk/drivers/input/input.c
+++ b/trunk/drivers/input/input.c
@@ -1599,14 +1599,11 @@ EXPORT_SYMBOL(input_free_device);
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
*
- * This function allocates all necessary memory for MT slot handling in the
- * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
- * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
+ * This function allocates all necessary memory for MT slot handling
+ * in the input device, and adds ABS_MT_SLOT to the device capabilities.
*/
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
{
- int i;
-
if (!num_slots)
return 0;
@@ -1617,10 +1614,6 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
dev->mtsize = num_slots;
input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
- /* Mark slots as 'unused' */
- for (i = 0; i < num_slots; i++)
- dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
-
return 0;
}
EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/trunk/drivers/input/keyboard/hil_kbd.c b/trunk/drivers/input/keyboard/hil_kbd.c
index 19fa94af207a..dcc86b97a153 100644
--- a/trunk/drivers/input/keyboard/hil_kbd.c
+++ b/trunk/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
if (absdev) {
val = lo + (hi << 8);
#ifdef TABLET_AUTOADJUST
- if (val < input_abs_get_min(dev, ABS_X + i))
+ if (val < input_abs_min(dev, ABS_X + i))
input_abs_set_min(dev, ABS_X + i, val);
- if (val > input_abs_get_max(dev, ABS_X + i))
+ if (val > input_abs_max(dev, ABS_X + i))
input_abs_set_max(dev, ABS_X + i, val);
#endif
if (i % 3)
- val = input_abs_get_max(dev, ABS_X + i) - val;
+ val = input_abs_max(dev, ABS_X + i) - val;
input_report_abs(dev, ABS_X + i, val);
} else {
val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
- int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
+ int diff = input_abs_max(input_dev, ABS_X + i) / 10;
input_abs_set_min(input_dev, ABS_X + i,
- input_abs_get_min(input_dev, ABS_X + i) + diff);
+ input_abs_min(input_dev, ABS_X + i) + diff)
input_abs_set_max(input_dev, ABS_X + i,
- input_abs_get_max(input_dev, ABS_X + i) - diff);
+ input_abs_max(input_dev, ABS_X + i) - diff)
}
#endif
diff --git a/trunk/drivers/input/keyboard/pxa27x_keypad.c b/trunk/drivers/input/keyboard/pxa27x_keypad.c
index f32404f99189..0e53b3bc39af 100644
--- a/trunk/drivers/input/keyboard/pxa27x_keypad.c
+++ b/trunk/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,6 +567,8 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
clk_put(keypad->clk);
input_unregister_device(keypad->input_dev);
+ input_free_device(keypad->input_dev);
+
iounmap(keypad->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/trunk/drivers/input/misc/uinput.c b/trunk/drivers/input/misc/uinput.c
index 0d4266a533a5..bb53fd33cd1c 100644
--- a/trunk/drivers/input/misc/uinput.c
+++ b/trunk/drivers/input/misc/uinput.c
@@ -811,8 +811,6 @@ static struct miscdevice uinput_misc = {
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
-MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
-MODULE_ALIAS("devname:" UINPUT_NAME);
static int __init uinput_init(void)
{
diff --git a/trunk/drivers/input/mouse/bcm5974.c b/trunk/drivers/input/mouse/bcm5974.c
index b95231763911..ea67c49146a3 100644
--- a/trunk/drivers/input/mouse/bcm5974.c
+++ b/trunk/drivers/input/mouse/bcm5974.c
@@ -337,14 +337,10 @@ static void report_finger_data(struct input_dev *input,
const struct bcm5974_config *cfg,
const struct tp_finger *f)
{
- input_report_abs(input, ABS_MT_TOUCH_MAJOR,
- raw2int(f->force_major) << 1);
- input_report_abs(input, ABS_MT_TOUCH_MINOR,
- raw2int(f->force_minor) << 1);
- input_report_abs(input, ABS_MT_WIDTH_MAJOR,
- raw2int(f->size_major) << 1);
- input_report_abs(input, ABS_MT_WIDTH_MINOR,
- raw2int(f->size_minor) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
input_report_abs(input, ABS_MT_ORIENTATION,
MAX_FINGER_ORIENTATION - raw2int(f->orientation));
input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/trunk/drivers/input/mousedev.c b/trunk/drivers/input/mousedev.c
index d528a2dba064..83c24cca234a 100644
--- a/trunk/drivers/input/mousedev.c
+++ b/trunk/drivers/input/mousedev.c
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
- size = input_abs_get_max(dev, ABS_X) -
- input_abs_get_min(dev, ABS_X);
+ size = input_abs_get_min(dev, ABS_X) -
+ input_abs_get_max(dev, ABS_X);
if (size == 0)
size = 256 * 2;
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size for ABS_Y to keep the same scale */
- size = input_abs_get_max(dev, ABS_X) -
- input_abs_get_min(dev, ABS_X);
+ size = input_abs_get_min(dev, ABS_X) -
+ input_abs_get_max(dev, ABS_X);
if (size == 0)
size = 256 * 2;
diff --git a/trunk/drivers/input/serio/i8042.c b/trunk/drivers/input/serio/i8042.c
index f58513160480..46e4ba0b9246 100644
--- a/trunk/drivers/input/serio/i8042.c
+++ b/trunk/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
- platform_device_unregister(i8042_platform_device);
platform_driver_unregister(&i8042_driver);
+ platform_device_unregister(i8042_platform_device);
i8042_platform_exit();
panic_blink = NULL;
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 6e29badb969e..40d77ba8fdc1 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
if (features->type == WACOM_G4 ||
features->type == WACOM_MO) {
input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
- rw = (data[7] & 0x04) - (data[7] & 0x03);
+ rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
} else {
input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
- rw = -(signed char)data[6];
+ rw = -(signed)data[6];
}
input_report_rel(input, REL_WHEEL, rw);
}
diff --git a/trunk/drivers/isdn/hardware/avm/Kconfig b/trunk/drivers/isdn/hardware/avm/Kconfig
index b99b906ea9b1..5dbcbe3a54a6 100644
--- a/trunk/drivers/isdn/hardware/avm/Kconfig
+++ b/trunk/drivers/isdn/hardware/avm/Kconfig
@@ -36,13 +36,12 @@ config ISDN_DRV_AVMB1_T1ISA
config ISDN_DRV_AVMB1_B1PCMCIA
tristate "AVM B1/M1/M2 PCMCIA support"
- depends on PCMCIA
help
Enable support for the PCMCIA version of the AVM B1 card.
config ISDN_DRV_AVMB1_AVM_CS
tristate "AVM B1/M1/M2 PCMCIA cs module"
- depends on ISDN_DRV_AVMB1_B1PCMCIA
+ depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
help
Enable the PCMCIA client driver for the AVM B1/M1/M2
PCMCIA cards.
diff --git a/trunk/drivers/macintosh/via-pmu.c b/trunk/drivers/macintosh/via-pmu.c
index 2d17e76066bd..35bc2737412f 100644
--- a/trunk/drivers/macintosh/via-pmu.c
+++ b/trunk/drivers/macintosh/via-pmu.c
@@ -45,7 +45,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -2350,52 +2349,11 @@ static long pmu_unlocked_ioctl(struct file *filp,
return ret;
}
-#ifdef CONFIG_COMPAT
-#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
-#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
-#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
-#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
-#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
-#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
-
-static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
-{
- switch (cmd) {
- case PMU_IOC_SLEEP:
- break;
- case PMU_IOC_GET_BACKLIGHT32:
- cmd = PMU_IOC_GET_BACKLIGHT;
- break;
- case PMU_IOC_SET_BACKLIGHT32:
- cmd = PMU_IOC_SET_BACKLIGHT;
- break;
- case PMU_IOC_GET_MODEL32:
- cmd = PMU_IOC_GET_MODEL;
- break;
- case PMU_IOC_HAS_ADB32:
- cmd = PMU_IOC_HAS_ADB;
- break;
- case PMU_IOC_CAN_SLEEP32:
- cmd = PMU_IOC_CAN_SLEEP;
- break;
- case PMU_IOC_GRAB_BACKLIGHT32:
- cmd = PMU_IOC_GRAB_BACKLIGHT;
- break;
- default:
- return -ENOIOCTLCMD;
- }
- return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
.unlocked_ioctl = pmu_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_pmu_ioctl,
-#endif
.open = pmu_open,
.release = pmu_release,
};
diff --git a/trunk/drivers/md/.gitignore b/trunk/drivers/md/.gitignore
new file mode 100644
index 000000000000..a7afec6b19c6
--- /dev/null
+++ b/trunk/drivers/md/.gitignore
@@ -0,0 +1,4 @@
+mktables
+raid6altivec*.c
+raid6int*.c
+raid6tables.c
diff --git a/trunk/drivers/md/bitmap.c b/trunk/drivers/md/bitmap.c
index ed4900ade93a..1ba1e122e948 100644
--- a/trunk/drivers/md/bitmap.c
+++ b/trunk/drivers/md/bitmap.c
@@ -1542,7 +1542,8 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ if (bitmap->mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c
index 43cf9cc9c1df..11567c7999a2 100644
--- a/trunk/drivers/md/md.c
+++ b/trunk/drivers/md/md.c
@@ -2136,6 +2136,16 @@ static void sync_sbs(mddev_t * mddev, int nospares)
* with the rest of the array)
*/
mdk_rdev_t *rdev;
+
+ /* First make sure individual recovery_offsets are correct */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->curr_resync_completed > rdev->recovery_offset)
+ rdev->recovery_offset = mddev->curr_resync_completed;
+
+ }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
@@ -2157,27 +2167,13 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req;
int nospares = 0;
-repeat:
- /* First make sure individual recovery_offsets are correct */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
-
- }
- if (!mddev->persistent) {
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
- wake_up(&mddev->sb_wait);
+ mddev->utime = get_seconds();
+ if (mddev->external)
return;
- }
-
+repeat:
spin_lock_irq(&mddev->write_lock);
- mddev->utime = get_seconds();
-
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -2225,6 +2221,19 @@ static void md_update_sb(mddev_t * mddev, int force_change)
MD_BUG();
mddev->events --;
}
+
+ /*
+ * do not write anything to disk if using
+ * nonpersistent superblocks
+ */
+ if (!mddev->persistent) {
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+
+ spin_unlock_irq(&mddev->write_lock);
+ wake_up(&mddev->sb_wait);
+ return;
+ }
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -3370,7 +3379,7 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
- else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
@@ -3451,7 +3460,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN,
+ &mddev->flags);
}
err = 0;
} else
@@ -3463,7 +3474,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ if (mddev->external)
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -6568,7 +6580,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -6577,6 +6588,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
@@ -6612,7 +6624,6 @@ int md_allow_write(mddev_t *mddev)
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -6622,7 +6633,7 @@ int md_allow_write(mddev_t *mddev)
} else
spin_unlock_irq(&mddev->write_lock);
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
return -EAGAIN;
else
return 0;
@@ -6820,7 +6831,8 @@ void md_do_sync(mddev_t *mddev)
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed =
mddev->curr_resync;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7099,7 +7111,8 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
diff --git a/trunk/drivers/md/md.h b/trunk/drivers/md/md.h
index 3931299788dc..a953fe2808ae 100644
--- a/trunk/drivers/md/md.h
+++ b/trunk/drivers/md/md.h
@@ -140,7 +140,7 @@ struct mddev_s
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_CHANGE_PENDING 2 /* superblock update in progress */
int suspended;
atomic_t active_io;
diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c
index ad83a4dcadc3..73cc74ffc26b 100644
--- a/trunk/drivers/md/raid1.c
+++ b/trunk/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- unsigned long do_barriers;
+ const bool do_sync = (bio->bi_rw & REQ_SYNC);
+ bool do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -1120,8 +1120,6 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
- int count = 0;
- unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
@@ -1133,16 +1131,15 @@ static int raid1_spare_active(mddev_t *mddev)
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
- count++;
- sysfs_notify_dirent(rdev->sysfs_state);
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded--;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
- spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return count;
+ return 0;
}
@@ -1643,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
+ const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
@@ -1699,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c
index 84718383124d..a88aeb5198c7 100644
--- a/trunk/drivers/md/raid10.c
+++ b/trunk/drivers/md/raid10.c
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+ const bool do_sync = (bio->bi_rw & REQ_SYNC);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
@@ -1116,8 +1116,6 @@ static int raid10_spare_active(mddev_t *mddev)
int i;
conf_t *conf = mddev->private;
mirror_info_t *tmp;
- int count = 0;
- unsigned long flags;
/*
* Find all non-in_sync disks within the RAID10 configuration
@@ -1128,16 +1126,15 @@ static int raid10_spare_active(mddev_t *mddev)
if (tmp->rdev
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- count++;
- sysfs_notify_dirent(tmp->rdev->sysfs_state);
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded--;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
- spin_unlock_irqrestore(&conf->device_lock, flags);
print_conf(conf);
- return count;
+ return 0;
}
@@ -1737,7 +1734,7 @@ static void raid10d(mddev_t *mddev)
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c
index 69b0a169e43d..866d4b5a144c 100644
--- a/trunk/drivers/md/raid5.c
+++ b/trunk/drivers/md/raid5.c
@@ -5330,8 +5330,6 @@ static int raid5_spare_active(mddev_t *mddev)
int i;
raid5_conf_t *conf = mddev->private;
struct disk_info *tmp;
- int count = 0;
- unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
@@ -5339,15 +5337,14 @@ static int raid5_spare_active(mddev_t *mddev)
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
- count++;
- sysfs_notify_dirent(tmp->rdev->sysfs_state);
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ mddev->degraded--;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
- spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
- return count;
+ return 0;
}
static int raid5_remove_disk(mddev_t *mddev, int number)
diff --git a/trunk/drivers/media/dvb/mantis/Kconfig b/trunk/drivers/media/dvb/mantis/Kconfig
index fd0830ed10d8..decdeda840d0 100644
--- a/trunk/drivers/media/dvb/mantis/Kconfig
+++ b/trunk/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
config MANTIS_CORE
tristate "Mantis/Hopper PCI bridge based devices"
- depends on PCI && I2C && INPUT && IR_CORE
+ depends on PCI && I2C && INPUT
help
Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/trunk/drivers/mmc/core/host.c b/trunk/drivers/mmc/core/host.c
index d80cfdc8edd2..0efe631e50ca 100644
--- a/trunk/drivers/mmc/core/host.c
+++ b/trunk/drivers/mmc/core/host.c
@@ -86,9 +86,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
-#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
-#endif
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/trunk/drivers/mmc/core/sdio.c b/trunk/drivers/mmc/core/sdio.c
index f332c52968b7..bd2755e8d9a3 100644
--- a/trunk/drivers/mmc/core/sdio.c
+++ b/trunk/drivers/mmc/core/sdio.c
@@ -362,8 +362,9 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto err;
}
- if (ocr & R4_MEMORY_PRESENT
- && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
+ err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
+
+ if (!err) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/trunk/drivers/mmc/host/Kconfig b/trunk/drivers/mmc/host/Kconfig
index 68d12794cfd9..283190bc2a40 100644
--- a/trunk/drivers/mmc/host/Kconfig
+++ b/trunk/drivers/mmc/host/Kconfig
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
- depends on MMC_SDHCI && PLAT_SAMSUNG
+ depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
diff --git a/trunk/drivers/mmc/host/at91_mci.c b/trunk/drivers/mmc/host/at91_mci.c
index 87226cd202a5..5f3a599ead07 100644
--- a/trunk/drivers/mmc/host/at91_mci.c
+++ b/trunk/drivers/mmc/host/at91_mci.c
@@ -66,7 +66,6 @@
#include
#include
#include
-#include
#include
diff --git a/trunk/drivers/mmc/host/imxmmc.c b/trunk/drivers/mmc/host/imxmmc.c
index 5a950b16d9e6..9a68ff4353a2 100644
--- a/trunk/drivers/mmc/host/imxmmc.c
+++ b/trunk/drivers/mmc/host/imxmmc.c
@@ -148,12 +148,11 @@ static int imxmci_start_clock(struct imxmci_host *host)
while (delay--) {
reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN) {
+ if (reg & STATUS_CARD_BUS_CLK_RUN)
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN)
return 0;
- }
if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c
index 4526d2791f29..4a8776f8afdd 100644
--- a/trunk/drivers/mmc/host/omap_hsmmc.c
+++ b/trunk/drivers/mmc/host/omap_hsmmc.c
@@ -2305,6 +2305,7 @@ static int omap_hsmmc_suspend(struct device *dev)
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+ pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2323,8 +2324,8 @@ static int omap_hsmmc_suspend(struct device *dev)
}
}
cancel_work_sync(&host->mmc_carddetect_work);
- ret = mmc_suspend_host(host->mmc);
mmc_host_enable(host->mmc);
+ ret = mmc_suspend_host(host->mmc);
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/trunk/drivers/mmc/host/s3cmci.c b/trunk/drivers/mmc/host/s3cmci.c
index 976330de379e..2e16e0a90a5e 100644
--- a/trunk/drivers/mmc/host/s3cmci.c
+++ b/trunk/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
host->pio_active = XFER_NONE;
#ifdef CONFIG_MMC_S3C_PIODMA
- host->dodma = host->pdata->use_dma;
+ host->dodma = host->pdata->dma;
#endif
host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/trunk/drivers/mmc/host/sdhci-s3c.c b/trunk/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..0a7f2614c6f0 100644
--- a/trunk/drivers/mmc/host/sdhci-s3c.c
+++ b/trunk/drivers/mmc/host/sdhci-s3c.c
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
if (host) {
- spin_lock(&host->lock);
+ mutex_lock(&host->lock);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
host->flags |= SDHCI_DEVICE_DEAD;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
- tasklet_schedule(&host->card_tasklet);
- spin_unlock(&host->lock);
+ sdhci_card_detect(host);
+ mutex_unlock(&host->lock);
}
}
diff --git a/trunk/drivers/mmc/host/sdhci.c b/trunk/drivers/mmc/host/sdhci.c
index 401527d273b5..785512133b50 100644
--- a/trunk/drivers/mmc/host/sdhci.c
+++ b/trunk/drivers/mmc/host/sdhci.c
@@ -1180,8 +1180,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
- if (ios->timing == MMC_TIMING_SD_HS &&
- !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+ if (ios->timing == MMC_TIMING_SD_HS)
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
diff --git a/trunk/drivers/mmc/host/sdhci.h b/trunk/drivers/mmc/host/sdhci.h
index d316bc79b636..036cfae76368 100644
--- a/trunk/drivers/mmc/host/sdhci.h
+++ b/trunk/drivers/mmc/host/sdhci.h
@@ -245,8 +245,6 @@ struct sdhci_host {
#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
/* Controller uses Auto CMD12 command to stop the transfer */
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
-/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
-#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
diff --git a/trunk/drivers/mmc/host/tmio_mmc.c b/trunk/drivers/mmc/host/tmio_mmc.c
index 69d98e3bf6ab..ee7d0a5a51c4 100644
--- a/trunk/drivers/mmc/host/tmio_mmc.c
+++ b/trunk/drivers/mmc/host/tmio_mmc.c
@@ -164,7 +164,6 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
- void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
@@ -174,8 +173,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
- sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
- buf = (unsigned short *)(sg_virt + host->sg_off);
+ buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
+ host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -192,7 +191,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
- tmio_mmc_kunmap_atomic(sg_virt, &flags);
+ tmio_mmc_kunmap_atomic(host, &flags);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
diff --git a/trunk/drivers/mmc/host/tmio_mmc.h b/trunk/drivers/mmc/host/tmio_mmc.h
index 0fedc78e3ea5..64f7d5dfc106 100644
--- a/trunk/drivers/mmc/host/tmio_mmc.h
+++ b/trunk/drivers/mmc/host/tmio_mmc.h
@@ -82,7 +82,10 @@
#define ack_mmc_irqs(host, i) \
do { \
- sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+ u32 mask;\
+ mask = sd_ctrl_read32((host), CTL_STATUS); \
+ mask &= ~((i) & TMIO_MASK_IRQ); \
+ sd_ctrl_write32((host), CTL_STATUS, mask); \
} while (0)
@@ -174,17 +177,19 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
return --host->sg_len;
}
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
+static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
unsigned long *flags)
{
+ struct scatterlist *sg = host->sg_ptr;
+
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void tmio_mmc_kunmap_atomic(void *virt,
+static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
unsigned long *flags)
{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
diff --git a/trunk/drivers/mtd/maps/physmap_of.c b/trunk/drivers/mtd/maps/physmap_of.c
index fe63f6bd663c..00af55d7afba 100644
--- a/trunk/drivers/mtd/maps/physmap_of.c
+++ b/trunk/drivers/mtd/maps/physmap_of.c
@@ -22,7 +22,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/drivers/mtd/nand/nand_base.c b/trunk/drivers/mtd/nand/nand_base.c
index d551ddd9537a..a3c7473dd409 100644
--- a/trunk/drivers/mtd/nand/nand_base.c
+++ b/trunk/drivers/mtd/nand/nand_base.c
@@ -2866,7 +2866,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
*/
if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
id_data[0] == NAND_MFR_SAMSUNG &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
@@ -2935,10 +2934,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
/* Set the bad block position */
- if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
- else
+ if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO ||
+ (*maf_id == NAND_MFR_SAMSUNG &&
+ mtd->writesize == 512) ||
+ *maf_id == NAND_MFR_AMD))
chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
diff --git a/trunk/drivers/mtd/nand/pxa3xx_nand.c b/trunk/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f3780207..e02fa4f0e3c9 100644
--- a/trunk/drivers/mtd/nand/pxa3xx_nand.c
+++ b/trunk/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
/* convert nand flash controller clock cycles to nano-seconds */
#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
diff --git a/trunk/drivers/mtd/ubi/Kconfig.debug b/trunk/drivers/mtd/ubi/Kconfig.debug
index 61f6e5e40458..2246f154e2f7 100644
--- a/trunk/drivers/mtd/ubi/Kconfig.debug
+++ b/trunk/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
depends on SYSFS
depends on MTD_UBI
select DEBUG_FS
- select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
+ select KALLSYMS_ALL
help
This option enables UBI debugging.
diff --git a/trunk/drivers/mtd/ubi/cdev.c b/trunk/drivers/mtd/ubi/cdev.c
index 3d2d1a69e9a0..4dfa6b90c21c 100644
--- a/trunk/drivers/mtd/ubi/cdev.c
+++ b/trunk/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
goto out_free;
}
- re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
- if (!re1) {
+ re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
- re1->remove = 1;
- re1->desc = desc;
- list_add(&re1->list, &rename_list);
+ re->remove = 1;
+ re->desc = desc;
+ list_add(&re->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
- re1->desc->vol->vol_id, re1->desc->vol->name);
+ re->desc->vol->vol_id, re->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
diff --git a/trunk/drivers/mtd/ubi/scan.c b/trunk/drivers/mtd/ubi/scan.c
index 69b52e9c9489..372a15ac9995 100644
--- a/trunk/drivers/mtd/ubi/scan.c
+++ b/trunk/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->erase);
+ err = add_to_list(si, pnum, ec, &si->corr);
if (err)
return err;
return 0;
diff --git a/trunk/drivers/mtd/ubi/wl.c b/trunk/drivers/mtd/ubi/wl.c
index 97a435672eaf..ee7b1d8fbb92 100644
--- a/trunk/drivers/mtd/ubi/wl.c
+++ b/trunk/drivers/mtd/ubi/wl.c
@@ -1212,8 +1212,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
- if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
- in_wl_tree(e, &ubi->erroneous)) {
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
spin_unlock(&ubi->wl_lock);
return 0;
}
diff --git a/trunk/drivers/net/3c59x.c b/trunk/drivers/net/3c59x.c
index a045559c81cf..c754d88e5ec9 100644
--- a/trunk/drivers/net/3c59x.c
+++ b/trunk/drivers/net/3c59x.c
@@ -633,8 +633,7 @@ struct vortex_private {
open:1,
medialock:1,
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
- large_frames:1, /* accept large frames */
- handling_irq:1; /* private in_irq indicator */
+ large_frames:1; /* accept large frames */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -647,7 +646,7 @@ struct vortex_private {
u16 io_size; /* Size of PCI region (for release_region) */
/* Serialises access to hardware other than MII and variables below.
- * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
+ * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
spinlock_t lock;
spinlock_t mii_lock; /* Serialises access to MII */
@@ -2134,15 +2133,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, vp->cur_tx);
}
- /*
- * We can't allow a recursion from our interrupt handler back into the
- * tx routine, as they take the same spin lock, and that causes
- * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
- * a bit
- */
- if (vp->handling_irq)
- return NETDEV_TX_BUSY;
-
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2345,13 +2335,11 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
-
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
- vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2459,7 +2447,6 @@ boomerang_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
- vp->handling_irq = 0;
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
@@ -2984,6 +2971,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
pci_power_t state = 0;
if(VORTEX_PCI(vp))
@@ -2993,7 +2981,9 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
+ spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig
index 2cc81a54cbf3..5a6895320b48 100644
--- a/trunk/drivers/net/Kconfig
+++ b/trunk/drivers/net/Kconfig
@@ -928,16 +928,6 @@ config SMC91X
The module will be called smc91x. If you want to compile it as a
module, say M here and read .
-config PXA168_ETH
- tristate "Marvell pxa168 ethernet support"
- depends on CPU_PXA168
- select PHYLIB
- help
- This driver supports the pxa168 Ethernet ports.
-
- To compile this driver as a module, choose M here. The module
- will be called pxa168_eth.
-
config NET_NETX
tristate "NetX Ethernet support"
select MII
diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile
index 3e8f150c4b14..56e8c27f77ce 100644
--- a/trunk/drivers/net/Makefile
+++ b/trunk/drivers/net/Makefile
@@ -244,7 +244,6 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_SMSC911X) += smsc911x.o
-obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/trunk/drivers/net/bnx2x/bnx2x.h b/trunk/drivers/net/bnx2x/bnx2x.h
index 0c2d96ed561c..53af9c93e75c 100644
--- a/trunk/drivers/net/bnx2x/bnx2x.h
+++ b/trunk/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.52.53-4"
-#define DRV_MODULE_RELDATE "2010/16/08"
+#define DRV_MODULE_VERSION "1.52.53-3"
+#define DRV_MODULE_RELDATE "2010/18/04"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
diff --git a/trunk/drivers/net/bnx2x/bnx2x_main.c b/trunk/drivers/net/bnx2x/bnx2x_main.c
index f8c3f08e4ce7..b4ec2b02a465 100644
--- a/trunk/drivers/net/bnx2x/bnx2x_main.c
+++ b/trunk/drivers/net/bnx2x/bnx2x_main.c
@@ -4328,12 +4328,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
val |= aeu_gpio_mask;
REG_WR(bp, offset, val);
}
- bp->port.need_hw_lock = 1;
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- bp->port.need_hw_lock = 1;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
/* add SPIO 5 to group 0 */
{
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4343,10 +4341,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, reg_addr, val);
}
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- bp->port.need_hw_lock = 1;
- break;
+
default:
break;
}
diff --git a/trunk/drivers/net/caif/Kconfig b/trunk/drivers/net/caif/Kconfig
index 75bfc3a9d95f..631a6242b011 100644
--- a/trunk/drivers/net/caif/Kconfig
+++ b/trunk/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF && HAS_DMA
+ depends on CAIF
default n
---help---
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/trunk/drivers/net/e1000e/82571.c b/trunk/drivers/net/e1000e/82571.c
index d3d4a57e2450..a4a0d2b6eb1c 100644
--- a/trunk/drivers/net/e1000e/82571.c
+++ b/trunk/drivers/net/e1000e/82571.c
@@ -936,14 +936,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- if (hw->mac.type == e1000_82571) {
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- return ret_val;
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
- e1000e_set_laa_state_82571(hw, true);
- }
+ e1000e_set_laa_state_82571(hw, true);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1620,16 +1618,14 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
s32 ret_val = 0;
- if (hw->mac.type == e1000_82571) {
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
- }
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
ret_val = e1000_read_mac_addr_generic(hw);
@@ -1837,7 +1833,6 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
- .flags2 = FLAG2_DISABLE_ASPM_L1,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
diff --git a/trunk/drivers/net/e1000e/defines.h b/trunk/drivers/net/e1000e/defines.h
index 93b3bedae8d2..307a72f483ee 100644
--- a/trunk/drivers/net/e1000e/defines.h
+++ b/trunk/drivers/net/e1000e/defines.h
@@ -621,7 +621,6 @@
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
-#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
@@ -644,9 +643,6 @@
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
-/* Mask bits for fields in Word 0x03 of the EEPROM */
-#define NVM_COMPAT_LOM 0x0800
-
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
diff --git a/trunk/drivers/net/e1000e/lib.c b/trunk/drivers/net/e1000e/lib.c
index 0fd4eb5ac5fb..df4a27922931 100644
--- a/trunk/drivers/net/e1000e/lib.c
+++ b/trunk/drivers/net/e1000e/lib.c
@@ -183,16 +183,6 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
- ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
- if (ret_val)
- goto out;
-
- /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
- if (!((nvm_data & NVM_COMPAT_LOM) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
- goto out;
-
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/trunk/drivers/net/ehea/ehea.h b/trunk/drivers/net/ehea/ehea.h
index 1846623c6ae6..99a929964e3c 100644
--- a/trunk/drivers/net/ehea/ehea.h
+++ b/trunk/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0106"
+#define DRV_VERSION "EHEA_0105"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -400,7 +400,6 @@ struct ehea_port_res {
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
- int sq_restart_flag;
};
diff --git a/trunk/drivers/net/ehea/ehea_main.c b/trunk/drivers/net/ehea/ehea_main.c
index a333b42111b8..897719b49f96 100644
--- a/trunk/drivers/net/ehea/ehea_main.c
+++ b/trunk/drivers/net/ehea/ehea_main.c
@@ -776,53 +776,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
return processed;
}
-#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
-
-static void reset_sq_restart_flag(struct ehea_port *port)
-{
- int i;
-
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
- struct ehea_port_res *pr = &port->port_res[i];
- pr->sq_restart_flag = 0;
- }
-}
-
-static void check_sqs(struct ehea_port *port)
-{
- struct ehea_swqe *swqe;
- int swqe_index;
- int i, k;
-
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
- struct ehea_port_res *pr = &port->port_res[i];
- k = 0;
- swqe = ehea_get_swqe(pr->qp, &swqe_index);
- memset(swqe, 0, SWQE_HEADER_SIZE);
- atomic_dec(&pr->swqe_avail);
-
- swqe->tx_control |= EHEA_SWQE_PURGE;
- swqe->wr_id = SWQE_RESTART_CHECK;
- swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
- swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
- swqe->immediate_data_length = 80;
-
- ehea_post_swqe(pr->qp, swqe);
-
- while (pr->sq_restart_flag == 0) {
- msleep(5);
- if (++k == 100) {
- ehea_error("HW/SW queues out of sync");
- ehea_schedule_port_reset(pr->port);
- return;
- }
- }
- }
-
- return;
-}
-
-
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
{
struct sk_buff *skb;
@@ -840,13 +793,6 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
-
- if (cqe->wr_id == SWQE_RESTART_CHECK) {
- pr->sq_restart_flag = 1;
- swqe_av++;
- break;
- }
-
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ehea_error("Bad send completion status=0x%04X",
cqe->status);
@@ -2729,10 +2675,8 @@ static void ehea_flush_sq(struct ehea_port *port)
int k = 0;
while (atomic_read(&pr->swqe_avail) < swqe_max) {
msleep(5);
- if (++k == 20) {
- ehea_error("WARNING: sq not flushed completely");
+ if (++k == 20)
break;
- }
}
}
}
@@ -2973,7 +2917,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
port_napi_disable(port);
mutex_unlock(&port->port_lock);
}
- reset_sq_restart_flag(port);
}
/* Unregister old memory region */
@@ -3008,7 +2951,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
mutex_lock(&port->port_lock);
port_napi_enable(port);
ret = ehea_restart_qps(dev);
- check_sqs(port);
if (!ret)
netif_wake_queue(dev);
mutex_unlock(&port->port_lock);
diff --git a/trunk/drivers/net/ibm_newemac/debug.c b/trunk/drivers/net/ibm_newemac/debug.c
index 8c6c1e2a8750..3995fafc1e08 100644
--- a/trunk/drivers/net/ibm_newemac/debug.c
+++ b/trunk/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
}
#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key)
+static void emac_sysrq_handler(int key, struct tty_struct *tty)
{
emac_dbg_dump_all();
}
diff --git a/trunk/drivers/net/ibmveth.c b/trunk/drivers/net/ibmveth.c
index 4734c939ad03..2602852cc55a 100644
--- a/trunk/drivers/net/ibmveth.c
+++ b/trunk/drivers/net/ibmveth.c
@@ -1113,8 +1113,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
struct ibmveth_adapter *adapter = netdev_priv(dev);
struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int i, rc;
- int need_restart = 0;
+ int i;
if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
@@ -1128,32 +1127,35 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
/* Deactivate all the buffer pools so that the next loop can activate
only the buffer pools necessary to hold the new MTU */
- if (netif_running(adapter->netdev)) {
- need_restart = 1;
- adapter->pool_config = 1;
- ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
- }
+ for (i = 0; i < IbmVethNumBufferPools; i++)
+ if (adapter->rx_buff_pool[i].active) {
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ adapter->rx_buff_pool[i].active = 0;
+ }
/* Look for an active buffer pool that can hold the new MTU */
for(i = 0; irx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- dev->mtu = new_mtu;
- vio_cmo_set_dev_desired(viodev,
+ if (netif_running(adapter->netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
- if (need_restart) {
return ibmveth_open(adapter->netdev);
}
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
return 0;
}
}
-
- if (need_restart && (rc = ibmveth_open(adapter->netdev)))
- return rc;
-
return -EINVAL;
}
diff --git a/trunk/drivers/net/ll_temac_main.c b/trunk/drivers/net/ll_temac_main.c
index bdf2149e5296..c7b624711f5e 100644
--- a/trunk/drivers/net/ll_temac_main.c
+++ b/trunk/drivers/net/ll_temac_main.c
@@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev)
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
- ll_temac_rx_irq(lp->tx_irq, ndev);
- ll_temac_tx_irq(lp->rx_irq, ndev);
+ ll_temac_rx_irq(lp->tx_irq, lp);
+ ll_temac_tx_irq(lp->rx_irq, lp);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
diff --git a/trunk/drivers/net/netxen/netxen_nic.h b/trunk/drivers/net/netxen/netxen_nic.h
index 6dca3574e355..ffa1b9ce1cc5 100644
--- a/trunk/drivers/net/netxen/netxen_nic.h
+++ b/trunk/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 74
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
+#define _NETXEN_NIC_LINUX_SUBVERSION 73
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/trunk/drivers/net/netxen/netxen_nic_init.c b/trunk/drivers/net/netxen/netxen_nic_init.c
index cabae7bb1fc6..c865dda2adf1 100644
--- a/trunk/drivers/net/netxen/netxen_nic_init.c
+++ b/trunk/drivers/net/netxen/netxen_nic_init.c
@@ -1805,6 +1805,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
netxen_ctx_msg msg = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1851,6 +1853,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
+
+ spin_unlock(&rds_ring->lock);
}
static void
diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c
index 73d314592230..fd86e18604e6 100644
--- a/trunk/drivers/net/netxen/netxen_nic_main.c
+++ b/trunk/drivers/net/netxen/netxen_nic_main.c
@@ -2032,6 +2032,8 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
struct netxen_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
+ memset(stats, 0, sizeof(*stats));
+
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
@@ -2131,16 +2133,9 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netxen_intr(adapter->irq, sds_ring);
- }
+ netxen_intr(adapter->irq, adapter);
enable_irq(adapter->irq);
}
#endif
diff --git a/trunk/drivers/net/pcmcia/pcnet_cs.c b/trunk/drivers/net/pcmcia/pcnet_cs.c
index 49279b0ee526..c3edfe4c2651 100644
--- a/trunk/drivers/net/pcmcia/pcnet_cs.c
+++ b/trunk/drivers/net/pcmcia/pcnet_cs.c
@@ -1637,7 +1637,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
- PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/trunk/drivers/net/phy/phy_device.c b/trunk/drivers/net/phy/phy_device.c
index 16ddc77313cb..c0761197c07e 100644
--- a/trunk/drivers/net/phy/phy_device.c
+++ b/trunk/drivers/net/phy/phy_device.c
@@ -466,8 +466,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interface = interface;
- phydev->state = PHY_READY;
-
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
diff --git a/trunk/drivers/net/pxa168_eth.c b/trunk/drivers/net/pxa168_eth.c
deleted file mode 100644
index 85eddda276bd..000000000000
--- a/trunk/drivers/net/pxa168_eth.c
+++ /dev/null
@@ -1,1666 +0,0 @@
-/*
- * PXA168 ethernet driver.
- * Most of the code is derived from mv643xx ethernet driver.
- *
- * Copyright (C) 2010 Marvell International Ltd.
- * Sachin Sanap
- * Philip Rakity
- * Mark Brown
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define DRIVER_NAME "pxa168-eth"
-#define DRIVER_VERSION "0.3"
-
-/*
- * Registers
- */
-
-#define PHY_ADDRESS 0x0000
-#define SMI 0x0010
-#define PORT_CONFIG 0x0400
-#define PORT_CONFIG_EXT 0x0408
-#define PORT_COMMAND 0x0410
-#define PORT_STATUS 0x0418
-#define HTPR 0x0428
-#define SDMA_CONFIG 0x0440
-#define SDMA_CMD 0x0448
-#define INT_CAUSE 0x0450
-#define INT_W_CLEAR 0x0454
-#define INT_MASK 0x0458
-#define ETH_F_RX_DESC_0 0x0480
-#define ETH_C_RX_DESC_0 0x04A0
-#define ETH_C_TX_DESC_1 0x04E4
-
-/* smi register */
-#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
-#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
-#define SMI_OP_W (0 << 26) /* Write operation */
-#define SMI_OP_R (1 << 26) /* Read operation */
-
-#define PHY_WAIT_ITERATIONS 10
-
-#define PXA168_ETH_PHY_ADDR_DEFAULT 0
-/* RX & TX descriptor command */
-#define BUF_OWNED_BY_DMA (1 << 31)
-
-/* RX descriptor status */
-#define RX_EN_INT (1 << 23)
-#define RX_FIRST_DESC (1 << 17)
-#define RX_LAST_DESC (1 << 16)
-#define RX_ERROR (1 << 15)
-
-/* TX descriptor command */
-#define TX_EN_INT (1 << 23)
-#define TX_GEN_CRC (1 << 22)
-#define TX_ZERO_PADDING (1 << 18)
-#define TX_FIRST_DESC (1 << 17)
-#define TX_LAST_DESC (1 << 16)
-#define TX_ERROR (1 << 15)
-
-/* SDMA_CMD */
-#define SDMA_CMD_AT (1 << 31)
-#define SDMA_CMD_TXDL (1 << 24)
-#define SDMA_CMD_TXDH (1 << 23)
-#define SDMA_CMD_AR (1 << 15)
-#define SDMA_CMD_ERD (1 << 7)
-
-/* Bit definitions of the Port Config Reg */
-#define PCR_HS (1 << 12)
-#define PCR_EN (1 << 7)
-#define PCR_PM (1 << 0)
-
-/* Bit definitions of the Port Config Extend Reg */
-#define PCXR_2BSM (1 << 28)
-#define PCXR_DSCP_EN (1 << 21)
-#define PCXR_MFL_1518 (0 << 14)
-#define PCXR_MFL_1536 (1 << 14)
-#define PCXR_MFL_2048 (2 << 14)
-#define PCXR_MFL_64K (3 << 14)
-#define PCXR_FLP (1 << 11)
-#define PCXR_PRIO_TX_OFF 3
-#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
-
-/* Bit definitions of the SDMA Config Reg */
-#define SDCR_BSZ_OFF 12
-#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
-#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
-#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
-#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
-#define SDCR_BLMR (1 << 6)
-#define SDCR_BLMT (1 << 7)
-#define SDCR_RIFB (1 << 9)
-#define SDCR_RC_OFF 2
-#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
-
-/*
- * Bit definitions of the Interrupt Cause Reg
- * and Interrupt MASK Reg is the same
- */
-#define ICR_RXBUF (1 << 0)
-#define ICR_TXBUF_H (1 << 2)
-#define ICR_TXBUF_L (1 << 3)
-#define ICR_TXEND_H (1 << 6)
-#define ICR_TXEND_L (1 << 7)
-#define ICR_RXERR (1 << 8)
-#define ICR_TXERR_H (1 << 10)
-#define ICR_TXERR_L (1 << 11)
-#define ICR_TX_UDR (1 << 13)
-#define ICR_MII_CH (1 << 28)
-
-#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
- ICR_TXERR_H | ICR_TXERR_L |\
- ICR_TXEND_H | ICR_TXEND_L |\
- ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
-
-#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
-
-#define NUM_RX_DESCS 64
-#define NUM_TX_DESCS 64
-
-#define HASH_ADD 0
-#define HASH_DELETE 1
-#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
-#define HOP_NUMBER 12
-
-/* Bit definitions for Port status */
-#define PORT_SPEED_100 (1 << 0)
-#define FULL_DUPLEX (1 << 1)
-#define FLOW_CONTROL_ENABLED (1 << 2)
-#define LINK_UP (1 << 3)
-
-/* Bit definitions for work to be done */
-#define WORK_LINK (1 << 0)
-#define WORK_TX_DONE (1 << 1)
-
-/*
- * Misc definitions.
- */
-#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
-
-struct rx_desc {
- u32 cmd_sts; /* Descriptor command status */
- u16 byte_cnt; /* Descriptor buffer byte count */
- u16 buf_size; /* Buffer size */
- u32 buf_ptr; /* Descriptor buffer pointer */
- u32 next_desc_ptr; /* Next descriptor pointer */
-};
-
-struct tx_desc {
- u32 cmd_sts; /* Command/status field */
- u16 reserved;
- u16 byte_cnt; /* buffer byte count */
- u32 buf_ptr; /* pointer to buffer for this descriptor */
- u32 next_desc_ptr; /* Pointer to next descriptor */
-};
-
-struct pxa168_eth_private {
- int port_num; /* User Ethernet port number */
-
- int rx_resource_err; /* Rx ring resource error flag */
-
- /* Next available and first returning Rx resource */
- int rx_curr_desc_q, rx_used_desc_q;
-
- /* Next available and first returning Tx resource */
- int tx_curr_desc_q, tx_used_desc_q;
-
- struct rx_desc *p_rx_desc_area;
- dma_addr_t rx_desc_dma;
- int rx_desc_area_size;
- struct sk_buff **rx_skb;
-
- struct tx_desc *p_tx_desc_area;
- dma_addr_t tx_desc_dma;
- int tx_desc_area_size;
- struct sk_buff **tx_skb;
-
- struct work_struct tx_timeout_task;
-
- struct net_device *dev;
- struct napi_struct napi;
- u8 work_todo;
- int skb_size;
-
- struct net_device_stats stats;
- /* Size of Tx Ring per queue */
- int tx_ring_size;
- /* Number of tx descriptors in use */
- int tx_desc_count;
- /* Size of Rx Ring per queue */
- int rx_ring_size;
- /* Number of rx descriptors in use */
- int rx_desc_count;
-
- /*
- * Used in case RX Ring is empty, which can occur when
- * system does not have resources (skb's)
- */
- struct timer_list timeout;
- struct mii_bus *smi_bus;
- struct phy_device *phy;
-
- /* clock */
- struct clk *clk;
- struct pxa168_eth_platform_data *pd;
- /*
- * Ethernet controller base address.
- */
- void __iomem *base;
-
- /* Pointer to the hardware address filter table */
- void *htpr;
- dma_addr_t htpr_dma;
-};
-
-struct addr_table_entry {
- __le32 lo;
- __le32 hi;
-};
-
-/* Bit fields of a Hash Table Entry */
-enum hash_table_entry {
- HASH_ENTRY_VALID = 1,
- SKIP = 2,
- HASH_ENTRY_RECEIVE_DISCARD = 4,
- HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
-};
-
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static int pxa168_init_hw(struct pxa168_eth_private *pep);
-static void eth_port_reset(struct net_device *dev);
-static void eth_port_start(struct net_device *dev);
-static int pxa168_eth_open(struct net_device *dev);
-static int pxa168_eth_stop(struct net_device *dev);
-static int ethernet_phy_setup(struct net_device *dev);
-
-static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
-{
- return readl(pep->base + offset);
-}
-
-static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
-{
- writel(data, pep->base + offset);
-}
-
-static void abort_dma(struct pxa168_eth_private *pep)
-{
- int delay;
- int max_retries = 40;
-
- do {
- wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
- udelay(100);
-
- delay = 10;
- while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
- && delay-- > 0) {
- udelay(10);
- }
- } while (max_retries-- > 0 && delay <= 0);
-
- if (max_retries <= 0)
- printk(KERN_ERR "%s : DMA Stuck\n", __func__);
-}
-
-static int ethernet_phy_get(struct pxa168_eth_private *pep)
-{
- unsigned int reg_data;
-
- reg_data = rdl(pep, PHY_ADDRESS);
-
- return (reg_data >> (5 * pep->port_num)) & 0x1f;
-}
-
-static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
-{
- u32 reg_data;
- int addr_shift = 5 * pep->port_num;
-
- reg_data = rdl(pep, PHY_ADDRESS);
- reg_data &= ~(0x1f << addr_shift);
- reg_data |= (phy_addr & 0x1f) << addr_shift;
- wrl(pep, PHY_ADDRESS, reg_data);
-}
-
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
- int data;
-
- data = phy_read(pep->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(pep->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(pep->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
-static void rxq_refill(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct sk_buff *skb;
- struct rx_desc *p_used_rx_desc;
- int used_rx_desc;
-
- while (pep->rx_desc_count < pep->rx_ring_size) {
- int size;
-
- skb = dev_alloc_skb(pep->skb_size);
- if (!skb)
- break;
- if (SKB_DMA_REALIGN)
- skb_reserve(skb, SKB_DMA_REALIGN);
- pep->rx_desc_count++;
- /* Get 'used' Rx descriptor */
- used_rx_desc = pep->rx_used_desc_q;
- p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
- size = skb->end - skb->data;
- p_used_rx_desc->buf_ptr = dma_map_single(NULL,
- skb->data,
- size,
- DMA_FROM_DEVICE);
- p_used_rx_desc->buf_size = size;
- pep->rx_skb[used_rx_desc] = skb;
-
- /* Return the descriptor to DMA ownership */
- wmb();
- p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
- wmb();
-
- /* Move the used descriptor pointer to the next descriptor */
- pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
-
- /* Any Rx return cancels the Rx resource error status */
- pep->rx_resource_err = 0;
-
- skb_reserve(skb, ETH_HW_IP_ALIGN);
- }
-
- /*
- * If RX ring is empty of SKB, set a timer to try allocating
- * again at a later time.
- */
- if (pep->rx_desc_count == 0) {
- pep->timeout.expires = jiffies + (HZ / 10);
- add_timer(&pep->timeout);
- }
-}
-
-static inline void rxq_refill_timer_wrapper(unsigned long data)
-{
- struct pxa168_eth_private *pep = (void *)data;
- napi_schedule(&pep->napi);
-}
-
-static inline u8 flip_8_bits(u8 x)
-{
- return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
- | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
- | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
- | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
-}
-
-static void nibble_swap_every_byte(unsigned char *mac_addr)
-{
- int i;
- for (i = 0; i < ETH_ALEN; i++) {
- mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
- ((mac_addr[i] & 0xf0) >> 4);
- }
-}
-
-static void inverse_every_nibble(unsigned char *mac_addr)
-{
- int i;
- for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = flip_8_bits(mac_addr[i]);
-}
-
-/*
- * ----------------------------------------------------------------------------
- * This function will calculate the hash function of the address.
- * Inputs
- * mac_addr_orig - MAC address.
- * Outputs
- * return the calculated entry.
- */
-static u32 hash_function(unsigned char *mac_addr_orig)
-{
- u32 hash_result;
- u32 addr0;
- u32 addr1;
- u32 addr2;
- u32 addr3;
- unsigned char mac_addr[ETH_ALEN];
-
- /* Make a copy of MAC address since we are going to performe bit
- * operations on it
- */
- memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
-
- nibble_swap_every_byte(mac_addr);
- inverse_every_nibble(mac_addr);
-
- addr0 = (mac_addr[5] >> 2) & 0x3f;
- addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
- addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
- addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
-
- hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
- hash_result = hash_result & 0x07ff;
- return hash_result;
-}
-
-/*
- * ----------------------------------------------------------------------------
- * This function will add/del an entry to the address table.
- * Inputs
- * pep - ETHERNET .
- * mac_addr - MAC address.
- * skip - if 1, skip this address.Used in case of deleting an entry which is a
- * part of chain in the hash table.We cant just delete the entry since
- * that will break the chain.We need to defragment the tables time to
- * time.
- * rd - 0 Discard packet upon match.
- * - 1 Receive packet upon match.
- * Outputs
- * address table entry is added/deleted.
- * 0 if success.
- * -ENOSPC if table full
- */
-static int add_del_hash_entry(struct pxa168_eth_private *pep,
- unsigned char *mac_addr,
- u32 rd, u32 skip, int del)
-{
- struct addr_table_entry *entry, *start;
- u32 new_high;
- u32 new_low;
- u32 i;
-
- new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
- | (((mac_addr[1] >> 0) & 0xf) << 11)
- | (((mac_addr[0] >> 4) & 0xf) << 7)
- | (((mac_addr[0] >> 0) & 0xf) << 3)
- | (((mac_addr[3] >> 4) & 0x1) << 31)
- | (((mac_addr[3] >> 0) & 0xf) << 27)
- | (((mac_addr[2] >> 4) & 0xf) << 23)
- | (((mac_addr[2] >> 0) & 0xf) << 19)
- | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
- | HASH_ENTRY_VALID;
-
- new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
- | (((mac_addr[5] >> 0) & 0xf) << 11)
- | (((mac_addr[4] >> 4) & 0xf) << 7)
- | (((mac_addr[4] >> 0) & 0xf) << 3)
- | (((mac_addr[3] >> 5) & 0x7) << 0);
-
- /*
- * Pick the appropriate table, start scanning for free/reusable
- * entries at the index obtained by hashing the specified MAC address
- */
- start = (struct addr_table_entry *)(pep->htpr);
- entry = start + hash_function(mac_addr);
- for (i = 0; i < HOP_NUMBER; i++) {
- if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
- break;
- } else {
- /* if same address put in same position */
- if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
- (new_low & 0xfffffff8)) &&
- (le32_to_cpu(entry->hi) == new_high)) {
- break;
- }
- }
- if (entry == start + 0x7ff)
- entry = start;
- else
- entry++;
- }
-
- if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
- (le32_to_cpu(entry->hi) != new_high) && del)
- return 0;
-
- if (i == HOP_NUMBER) {
- if (!del) {
- printk(KERN_INFO "%s: table section is full, need to "
- "move to 16kB implementation?\n",
- __FILE__);
- return -ENOSPC;
- } else
- return 0;
- }
-
- /*
- * Update the selected entry
- */
- if (del) {
- entry->hi = 0;
- entry->lo = 0;
- } else {
- entry->hi = cpu_to_le32(new_high);
- entry->lo = cpu_to_le32(new_low);
- }
-
- return 0;
-}
-
-/*
- * ----------------------------------------------------------------------------
- * Create an addressTable entry from MAC address info
- * found in the specifed net_device struct
- *
- * Input : pointer to ethernet interface network device structure
- * Output : N/A
- */
-static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
- unsigned char *oaddr,
- unsigned char *addr)
-{
- /* Delete old entry */
- if (oaddr)
- add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
- /* Add new entry */
- add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
-}
-
-static int init_hash_table(struct pxa168_eth_private *pep)
-{
- /*
- * Hardware expects CPU to build a hash table based on a predefined
- * hash function and populate it based on hardware address. The
- * location of the hash table is identified by 32-bit pointer stored
- * in HTPR internal register. Two possible sizes exists for the hash
- * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
- * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
- * 1/2kB.
- */
- /* TODO: Add support for 8kB hash table and alternative hash
- * function.Driver can dynamically switch to them if the 1/2kB hash
- * table is full.
- */
- if (pep->htpr == NULL) {
- pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
- if (pep->htpr == NULL)
- return -ENOMEM;
- }
- memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
- wrl(pep, HTPR, pep->htpr_dma);
- return 0;
-}
-
-static void pxa168_eth_set_rx_mode(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u32 val;
-
- val = rdl(pep, PORT_CONFIG);
- if (dev->flags & IFF_PROMISC)
- val |= PCR_PM;
- else
- val &= ~PCR_PM;
- wrl(pep, PORT_CONFIG, val);
-
- /*
- * Remove the old list of MAC address and add dev->addr
- * and multicast address.
- */
- memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
- update_hash_table_mac_address(pep, NULL, dev->dev_addr);
-
- netdev_for_each_mc_addr(ha, dev)
- update_hash_table_mac_address(pep, NULL, ha->addr);
-}
-
-static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *sa = addr;
- struct pxa168_eth_private *pep = netdev_priv(dev);
- unsigned char oldMac[ETH_ALEN];
-
- if (!is_valid_ether_addr(sa->sa_data))
- return -EINVAL;
- memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
- netif_addr_lock_bh(dev);
- update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
- netif_addr_unlock_bh(dev);
- return 0;
-}
-
-static void eth_port_start(struct net_device *dev)
-{
- unsigned int val = 0;
- struct pxa168_eth_private *pep = netdev_priv(dev);
- int tx_curr_desc, rx_curr_desc;
-
- /* Perform PHY reset, if there is a PHY. */
- if (pep->phy != NULL) {
- struct ethtool_cmd cmd;
-
- pxa168_get_settings(pep->dev, &cmd);
- ethernet_phy_reset(pep);
- pxa168_set_settings(pep->dev, &cmd);
- }
-
- /* Assignment of Tx CTRP of given queue */
- tx_curr_desc = pep->tx_curr_desc_q;
- wrl(pep, ETH_C_TX_DESC_1,
- (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
-
- /* Assignment of Rx CRDP of given queue */
- rx_curr_desc = pep->rx_curr_desc_q;
- wrl(pep, ETH_C_RX_DESC_0,
- (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
-
- wrl(pep, ETH_F_RX_DESC_0,
- (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
-
- /* Clear all interrupts */
- wrl(pep, INT_CAUSE, 0);
-
- /* Enable all interrupts for receive, transmit and error. */
- wrl(pep, INT_MASK, ALL_INTS);
-
- val = rdl(pep, PORT_CONFIG);
- val |= PCR_EN;
- wrl(pep, PORT_CONFIG, val);
-
- /* Start RX DMA engine */
- val = rdl(pep, SDMA_CMD);
- val |= SDMA_CMD_ERD;
- wrl(pep, SDMA_CMD, val);
-}
-
-static void eth_port_reset(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- unsigned int val = 0;
-
- /* Stop all interrupts for receive, transmit and error. */
- wrl(pep, INT_MASK, 0);
-
- /* Clear all interrupts */
- wrl(pep, INT_CAUSE, 0);
-
- /* Stop RX DMA */
- val = rdl(pep, SDMA_CMD);
- val &= ~SDMA_CMD_ERD; /* abort dma command */
-
- /* Abort any transmit and receive operations and put DMA
- * in idle state.
- */
- abort_dma(pep);
-
- /* Disable port */
- val = rdl(pep, PORT_CONFIG);
- val &= ~PCR_EN;
- wrl(pep, PORT_CONFIG, val);
-}
-
-/*
- * txq_reclaim - Free the tx desc data for completed descriptors
- * If force is non-zero, frees uncompleted descriptors as well
- */
-static int txq_reclaim(struct net_device *dev, int force)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct tx_desc *desc;
- u32 cmd_sts;
- struct sk_buff *skb;
- int tx_index;
- dma_addr_t addr;
- int count;
- int released = 0;
-
- netif_tx_lock(dev);
-
- pep->work_todo &= ~WORK_TX_DONE;
- while (pep->tx_desc_count > 0) {
- tx_index = pep->tx_used_desc_q;
- desc = &pep->p_tx_desc_area[tx_index];
- cmd_sts = desc->cmd_sts;
- if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
- if (released > 0) {
- goto txq_reclaim_end;
- } else {
- released = -1;
- goto txq_reclaim_end;
- }
- }
- pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
- pep->tx_desc_count--;
- addr = desc->buf_ptr;
- count = desc->byte_cnt;
- skb = pep->tx_skb[tx_index];
- if (skb)
- pep->tx_skb[tx_index] = NULL;
-
- if (cmd_sts & TX_ERROR) {
- if (net_ratelimit())
- printk(KERN_ERR "%s: Error in TX\n", dev->name);
- dev->stats.tx_errors++;
- }
- dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
- if (skb)
- dev_kfree_skb_irq(skb);
- released++;
- }
-txq_reclaim_end:
- netif_tx_unlock(dev);
- return released;
-}
-
-static void pxa168_eth_tx_timeout(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- printk(KERN_INFO "%s: TX timeout desc_count %d\n",
- dev->name, pep->tx_desc_count);
-
- schedule_work(&pep->tx_timeout_task);
-}
-
-static void pxa168_eth_tx_timeout_task(struct work_struct *work)
-{
- struct pxa168_eth_private *pep = container_of(work,
- struct pxa168_eth_private,
- tx_timeout_task);
- struct net_device *dev = pep->dev;
- pxa168_eth_stop(dev);
- pxa168_eth_open(dev);
-}
-
-static int rxq_process(struct net_device *dev, int budget)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- unsigned int received_packets = 0;
- struct sk_buff *skb;
-
- while (budget-- > 0) {
- int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
- struct rx_desc *rx_desc;
- unsigned int cmd_sts;
-
- /* Do not process Rx ring in case of Rx ring resource error */
- if (pep->rx_resource_err)
- break;
- rx_curr_desc = pep->rx_curr_desc_q;
- rx_used_desc = pep->rx_used_desc_q;
- rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
- cmd_sts = rx_desc->cmd_sts;
- rmb();
- if (cmd_sts & (BUF_OWNED_BY_DMA))
- break;
- skb = pep->rx_skb[rx_curr_desc];
- pep->rx_skb[rx_curr_desc] = NULL;
-
- rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
- pep->rx_curr_desc_q = rx_next_curr_desc;
-
- /* Rx descriptors exhausted. */
- /* Set the Rx ring resource error flag */
- if (rx_next_curr_desc == rx_used_desc)
- pep->rx_resource_err = 1;
- pep->rx_desc_count--;
- dma_unmap_single(NULL, rx_desc->buf_ptr,
- rx_desc->buf_size,
- DMA_FROM_DEVICE);
- received_packets++;
- /*
- * Update statistics.
- * Note byte count includes 4 byte CRC count
- */
- stats->rx_packets++;
- stats->rx_bytes += rx_desc->byte_cnt;
- /*
- * In case received a packet without first / last bits on OR
- * the error summary bit is on, the packets needs to be droped.
- */
- if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
- (RX_FIRST_DESC | RX_LAST_DESC))
- || (cmd_sts & RX_ERROR)) {
-
- stats->rx_dropped++;
- if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
- (RX_FIRST_DESC | RX_LAST_DESC)) {
- if (net_ratelimit())
- printk(KERN_ERR
- "%s: Rx pkt on multiple desc\n",
- dev->name);
- }
- if (cmd_sts & RX_ERROR)
- stats->rx_errors++;
- dev_kfree_skb_irq(skb);
- } else {
- /*
- * The -4 is for the CRC in the trailer of the
- * received packet
- */
- skb_put(skb, rx_desc->byte_cnt - 4);
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- }
- dev->last_rx = jiffies;
- }
- /* Fill RX ring with skb's */
- rxq_refill(dev);
- return received_packets;
-}
-
-static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
- struct net_device *dev)
-{
- u32 icr;
- int ret = 0;
-
- icr = rdl(pep, INT_CAUSE);
- if (icr == 0)
- return IRQ_NONE;
-
- wrl(pep, INT_CAUSE, ~icr);
- if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
- pep->work_todo |= WORK_TX_DONE;
- ret = 1;
- }
- if (icr & ICR_RXBUF)
- ret = 1;
- if (icr & ICR_MII_CH) {
- pep->work_todo |= WORK_LINK;
- ret = 1;
- }
- return ret;
-}
-
-static void handle_link_event(struct pxa168_eth_private *pep)
-{
- struct net_device *dev = pep->dev;
- u32 port_status;
- int speed;
- int duplex;
- int fc;
-
- port_status = rdl(pep, PORT_STATUS);
- if (!(port_status & LINK_UP)) {
- if (netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link down\n", dev->name);
- netif_carrier_off(dev);
- txq_reclaim(dev, 1);
- }
- return;
- }
- if (port_status & PORT_SPEED_100)
- speed = 100;
- else
- speed = 10;
-
- duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
- fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half", fc ? "en" : "dis");
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-}
-
-static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if (unlikely(!pxa168_eth_collect_events(pep, dev)))
- return IRQ_NONE;
- /* Disable interrupts */
- wrl(pep, INT_MASK, 0);
- napi_schedule(&pep->napi);
- return IRQ_HANDLED;
-}
-
-static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
-{
- int skb_size;
-
- /*
- * Reserve 2+14 bytes for an ethernet header (the hardware
- * automatically prepends 2 bytes of dummy data to each
- * received packet), 16 bytes for up to four VLAN tags, and
- * 4 bytes for the trailing FCS -- 36 bytes total.
- */
- skb_size = pep->dev->mtu + 36;
-
- /*
- * Make sure that the skb size is a multiple of 8 bytes, as
- * the lower three bits of the receive descriptor's buffer
- * size field are ignored by the hardware.
- */
- pep->skb_size = (skb_size + 7) & ~7;
-
- /*
- * If NET_SKB_PAD is smaller than a cache line,
- * netdev_alloc_skb() will cause skb->data to be misaligned
- * to a cache line boundary. If this is the case, include
- * some extra space to allow re-aligning the data area.
- */
- pep->skb_size += SKB_DMA_REALIGN;
-
-}
-
-static int set_port_config_ext(struct pxa168_eth_private *pep)
-{
- int skb_size;
-
- pxa168_eth_recalc_skb_size(pep);
- if (pep->skb_size <= 1518)
- skb_size = PCXR_MFL_1518;
- else if (pep->skb_size <= 1536)
- skb_size = PCXR_MFL_1536;
- else if (pep->skb_size <= 2048)
- skb_size = PCXR_MFL_2048;
- else
- skb_size = PCXR_MFL_64K;
-
- /* Extended Port Configuration */
- wrl(pep,
- PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
- PCXR_DSCP_EN | /* Enable DSCP in IP */
- skb_size | PCXR_FLP | /* do not force link pass */
- PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
-
- return 0;
-}
-
-static int pxa168_init_hw(struct pxa168_eth_private *pep)
-{
- int err = 0;
-
- /* Disable interrupts */
- wrl(pep, INT_MASK, 0);
- wrl(pep, INT_CAUSE, 0);
- /* Write to ICR to clear interrupts. */
- wrl(pep, INT_W_CLEAR, 0);
- /* Abort any transmit and receive operations and put DMA
- * in idle state.
- */
- abort_dma(pep);
- /* Initialize address hash table */
- err = init_hash_table(pep);
- if (err)
- return err;
- /* SDMA configuration */
- wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
- SDCR_RIFB | /* Rx interrupt on frame */
- SDCR_BLMT | /* Little endian transmit */
- SDCR_BLMR | /* Little endian receive */
- SDCR_RC_MAX_RETRANS); /* Max retransmit count */
- /* Port Configuration */
- wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
- set_port_config_ext(pep);
-
- return err;
-}
-
-static int rxq_init(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct rx_desc *p_rx_desc;
- int size = 0, i = 0;
- int rx_desc_num = pep->rx_ring_size;
-
- /* Allocate RX skb rings */
- pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
- GFP_KERNEL);
- if (!pep->rx_skb) {
- printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
- return -ENOMEM;
- }
- /* Allocate RX ring */
- pep->rx_desc_count = 0;
- size = pep->rx_ring_size * sizeof(struct rx_desc);
- pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma, GFP_KERNEL);
- if (!pep->p_rx_desc_area) {
- printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
- dev->name, size);
- goto out;
- }
- memset((void *)pep->p_rx_desc_area, 0, size);
- /* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
- for (i = 0; i < rx_desc_num; i++) {
- p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
- ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
- }
- /* Save Rx desc pointer to driver struct. */
- pep->rx_curr_desc_q = 0;
- pep->rx_used_desc_q = 0;
- pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
- return 0;
-out:
- kfree(pep->rx_skb);
- return -ENOMEM;
-}
-
-static void rxq_deinit(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- int curr;
-
- /* Free preallocated skb's on RX rings */
- for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
- if (pep->rx_skb[curr]) {
- dev_kfree_skb(pep->rx_skb[curr]);
- pep->rx_desc_count--;
- }
- }
- if (pep->rx_desc_count)
- printk(KERN_ERR
- "Error in freeing Rx Ring. %d skb's still\n",
- pep->rx_desc_count);
- /* Free RX ring */
- if (pep->p_rx_desc_area)
- dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
- pep->p_rx_desc_area, pep->rx_desc_dma);
- kfree(pep->rx_skb);
-}
-
-static int txq_init(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct tx_desc *p_tx_desc;
- int size = 0, i = 0;
- int tx_desc_num = pep->tx_ring_size;
-
- pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
- GFP_KERNEL);
- if (!pep->tx_skb) {
- printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
- return -ENOMEM;
- }
- /* Allocate TX ring */
- pep->tx_desc_count = 0;
- size = pep->tx_ring_size * sizeof(struct tx_desc);
- pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma, GFP_KERNEL);
- if (!pep->p_tx_desc_area) {
- printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
- dev->name, size);
- goto out;
- }
- memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
- /* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
- for (i = 0; i < tx_desc_num; i++) {
- p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
- ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
- }
- pep->tx_curr_desc_q = 0;
- pep->tx_used_desc_q = 0;
- pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
- return 0;
-out:
- kfree(pep->tx_skb);
- return -ENOMEM;
-}
-
-static void txq_deinit(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- /* Free outstanding skb's on TX ring */
- txq_reclaim(dev, 1);
- BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
- /* Free TX ring */
- if (pep->p_tx_desc_area)
- dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
- pep->p_tx_desc_area, pep->tx_desc_dma);
- kfree(pep->tx_skb);
-}
-
-static int pxa168_eth_open(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- int err;
-
- err = request_irq(dev->irq, pxa168_eth_int_handler,
- IRQF_DISABLED, dev->name, dev);
- if (err) {
- dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
- return -EAGAIN;
- }
- pep->rx_resource_err = 0;
- err = rxq_init(dev);
- if (err != 0)
- goto out_free_irq;
- err = txq_init(dev);
- if (err != 0)
- goto out_free_rx_skb;
- pep->rx_used_desc_q = 0;
- pep->rx_curr_desc_q = 0;
-
- /* Fill RX ring with skb's */
- rxq_refill(dev);
- pep->rx_used_desc_q = 0;
- pep->rx_curr_desc_q = 0;
- netif_carrier_off(dev);
- eth_port_start(dev);
- napi_enable(&pep->napi);
- return 0;
-out_free_rx_skb:
- rxq_deinit(dev);
-out_free_irq:
- free_irq(dev->irq, dev);
- return err;
-}
-
-static int pxa168_eth_stop(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- eth_port_reset(dev);
-
- /* Disable interrupts */
- wrl(pep, INT_MASK, 0);
- wrl(pep, INT_CAUSE, 0);
- /* Write to ICR to clear interrupts. */
- wrl(pep, INT_W_CLEAR, 0);
- napi_disable(&pep->napi);
- del_timer_sync(&pep->timeout);
- netif_carrier_off(dev);
- free_irq(dev->irq, dev);
- rxq_deinit(dev);
- txq_deinit(dev);
-
- return 0;
-}
-
-static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
-{
- int retval;
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if ((mtu > 9500) || (mtu < 68))
- return -EINVAL;
-
- dev->mtu = mtu;
- retval = set_port_config_ext(pep);
-
- if (!netif_running(dev))
- return 0;
-
- /*
- * Stop and then re-open the interface. This will allocate RX
- * skbs of the new MTU.
- * There is a possible danger that the open will not succeed,
- * due to memory being full.
- */
- pxa168_eth_stop(dev);
- if (pxa168_eth_open(dev)) {
- dev_printk(KERN_ERR, &dev->dev,
- "fatal error on re-opening device after "
- "MTU change\n");
- }
-
- return 0;
-}
-
-static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
-{
- int tx_desc_curr;
-
- tx_desc_curr = pep->tx_curr_desc_q;
- pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
- BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
- pep->tx_desc_count++;
-
- return tx_desc_curr;
-}
-
-static int pxa168_rx_poll(struct napi_struct *napi, int budget)
-{
- struct pxa168_eth_private *pep =
- container_of(napi, struct pxa168_eth_private, napi);
- struct net_device *dev = pep->dev;
- int work_done = 0;
-
- if (unlikely(pep->work_todo & WORK_LINK)) {
- pep->work_todo &= ~(WORK_LINK);
- handle_link_event(pep);
- }
- /*
- * We call txq_reclaim every time since in NAPI interupts are disabled
- * and due to this we miss the TX_DONE interrupt,which is not updated in
- * interrupt status register.
- */
- txq_reclaim(dev, 0);
- if (netif_queue_stopped(dev)
- && pep->tx_ring_size - pep->tx_desc_count > 1) {
- netif_wake_queue(dev);
- }
- work_done = rxq_process(dev, budget);
- if (work_done < budget) {
- napi_complete(napi);
- wrl(pep, INT_MASK, ALL_INTS);
- }
-
- return work_done;
-}
-
-static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct tx_desc *desc;
- int tx_index;
- int length;
-
- tx_index = eth_alloc_tx_desc_index(pep);
- desc = &pep->p_tx_desc_area[tx_index];
- length = skb->len;
- pep->tx_skb[tx_index] = skb;
- desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
- wmb();
- desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
- TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
- wmb();
- wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
-
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- dev->trans_start = jiffies;
- if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
- /* We handled the current skb, but now we are out of space.*/
- netif_stop_queue(dev);
- }
-
- return NETDEV_TX_OK;
-}
-
-static int smi_wait_ready(struct pxa168_eth_private *pep)
-{
- int i = 0;
-
- /* wait for the SMI register to become available */
- for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
- if (i == PHY_WAIT_ITERATIONS)
- return -ETIMEDOUT;
- msleep(10);
- }
-
- return 0;
-}
-
-static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct pxa168_eth_private *pep = bus->priv;
- int i = 0;
- int val;
-
- if (smi_wait_ready(pep)) {
- printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
- wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
- /* now wait for the data to be valid */
- for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
- if (i == PHY_WAIT_ITERATIONS) {
- printk(KERN_WARNING
- "pxa168_eth: SMI bus read not valid\n");
- return -ENODEV;
- }
- msleep(10);
- }
-
- return val & 0xffff;
-}
-
-static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
- u16 value)
-{
- struct pxa168_eth_private *pep = bus->priv;
-
- if (smi_wait_ready(pep)) {
- printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
- SMI_OP_W | (value & 0xffff));
-
- if (smi_wait_ready(pep)) {
- printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- if (pep->phy != NULL)
- return phy_mii_ioctl(pep->phy, ifr, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
-{
- struct mii_bus *bus = pep->smi_bus;
- struct phy_device *phydev;
- int start;
- int num;
- int i;
-
- if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
- /* Scan entire range */
- start = ethernet_phy_get(pep);
- num = 32;
- } else {
- /* Use phy addr specific to platform */
- start = phy_addr & 0x1f;
- num = 1;
- }
- phydev = NULL;
- for (i = 0; i < num; i++) {
- int addr = (start + i) & 0x1f;
- if (bus->phy_map[addr] == NULL)
- mdiobus_scan(bus, addr);
-
- if (phydev == NULL) {
- phydev = bus->phy_map[addr];
- if (phydev != NULL)
- ethernet_phy_set_addr(pep, addr);
- }
- }
-
- return phydev;
-}
-
-static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
-{
- struct phy_device *phy = pep->phy;
- ethernet_phy_reset(pep);
-
- phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
-
- if (speed == 0) {
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy->supported &= PHY_BASIC_FEATURES;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
- } else {
- phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
- phy->speed = speed;
- phy->duplex = duplex;
- }
- phy_start_aneg(phy);
-}
-
-static int ethernet_phy_setup(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if (pep->pd->init)
- pep->pd->init();
- pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
- if (pep->phy != NULL)
- phy_init(pep, pep->pd->speed, pep->pd->duplex);
- update_hash_table_mac_address(pep, NULL, dev->dev_addr);
-
- return 0;
-}
-
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- int err;
-
- err = phy_read_status(pep->phy);
- if (err == 0)
- err = phy_ethtool_gset(pep->phy, cmd);
-
- return err;
-}
-
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- return phy_ethtool_sset(pep->phy, cmd);
-}
-
-static void pxa168_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strncpy(info->driver, DRIVER_NAME, 32);
- strncpy(info->version, DRIVER_VERSION, 32);
- strncpy(info->fw_version, "N/A", 32);
- strncpy(info->bus_info, "N/A", 32);
-}
-
-static u32 pxa168_get_link(struct net_device *dev)
-{
- return !!netif_carrier_ok(dev);
-}
-
-static const struct ethtool_ops pxa168_ethtool_ops = {
- .get_settings = pxa168_get_settings,
- .set_settings = pxa168_set_settings,
- .get_drvinfo = pxa168_get_drvinfo,
- .get_link = pxa168_get_link,
-};
-
-static const struct net_device_ops pxa168_eth_netdev_ops = {
- .ndo_open = pxa168_eth_open,
- .ndo_stop = pxa168_eth_stop,
- .ndo_start_xmit = pxa168_eth_start_xmit,
- .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
- .ndo_set_mac_address = pxa168_eth_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = pxa168_eth_do_ioctl,
- .ndo_change_mtu = pxa168_eth_change_mtu,
- .ndo_tx_timeout = pxa168_eth_tx_timeout,
-};
-
-static int pxa168_eth_probe(struct platform_device *pdev)
-{
- struct pxa168_eth_private *pep = NULL;
- struct net_device *dev = NULL;
- struct resource *res;
- struct clk *clk;
- int err;
-
- printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
-
- clk = clk_get(&pdev->dev, "MFUCLK");
- if (IS_ERR(clk)) {
- printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
- DRIVER_NAME);
- return -ENODEV;
- }
- clk_enable(clk);
-
- dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
- if (!dev) {
- err = -ENOMEM;
- goto err_clk;
- }
-
- platform_set_drvdata(pdev, dev);
- pep = netdev_priv(dev);
- pep->dev = dev;
- pep->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- err = -ENODEV;
- goto err_netdev;
- }
- pep->base = ioremap(res->start, res->end - res->start + 1);
- if (pep->base == NULL) {
- err = -ENOMEM;
- goto err_netdev;
- }
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
- dev->netdev_ops = &pxa168_eth_netdev_ops;
- dev->watchdog_timeo = 2 * HZ;
- dev->base_addr = 0;
- SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
-
- INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
-
- printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
- random_ether_addr(dev->dev_addr);
-
- pep->pd = pdev->dev.platform_data;
- pep->rx_ring_size = NUM_RX_DESCS;
- if (pep->pd->rx_queue_size)
- pep->rx_ring_size = pep->pd->rx_queue_size;
-
- pep->tx_ring_size = NUM_TX_DESCS;
- if (pep->pd->tx_queue_size)
- pep->tx_ring_size = pep->pd->tx_queue_size;
-
- pep->port_num = pep->pd->port_number;
- /* Hardware supports only 3 ports */
- BUG_ON(pep->port_num > 2);
- netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
-
- memset(&pep->timeout, 0, sizeof(struct timer_list));
- init_timer(&pep->timeout);
- pep->timeout.function = rxq_refill_timer_wrapper;
- pep->timeout.data = (unsigned long)pep;
-
- pep->smi_bus = mdiobus_alloc();
- if (pep->smi_bus == NULL) {
- err = -ENOMEM;
- goto err_base;
- }
- pep->smi_bus->priv = pep;
- pep->smi_bus->name = "pxa168_eth smi";
- pep->smi_bus->read = pxa168_smi_read;
- pep->smi_bus->write = pxa168_smi_write;
- snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
- pep->smi_bus->parent = &pdev->dev;
- pep->smi_bus->phy_mask = 0xffffffff;
- err = mdiobus_register(pep->smi_bus);
- if (err)
- goto err_free_mdio;
-
- pxa168_init_hw(pep);
- err = ethernet_phy_setup(dev);
- if (err)
- goto err_mdiobus;
- SET_NETDEV_DEV(dev, &pdev->dev);
- err = register_netdev(dev);
- if (err)
- goto err_mdiobus;
- return 0;
-
-err_mdiobus:
- mdiobus_unregister(pep->smi_bus);
-err_free_mdio:
- mdiobus_free(pep->smi_bus);
-err_base:
- iounmap(pep->base);
-err_netdev:
- free_netdev(dev);
-err_clk:
- clk_disable(clk);
- clk_put(clk);
- return err;
-}
-
-static int pxa168_eth_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if (pep->htpr) {
- dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
- pep->htpr, pep->htpr_dma);
- pep->htpr = NULL;
- }
- if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
- }
- if (pep->phy != NULL)
- phy_detach(pep->phy);
-
- iounmap(pep->base);
- pep->base = NULL;
- mdiobus_unregister(pep->smi_bus);
- mdiobus_free(pep->smi_bus);
- unregister_netdev(dev);
- flush_scheduled_work();
- free_netdev(dev);
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-static void pxa168_eth_shutdown(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- eth_port_reset(dev);
-}
-
-#ifdef CONFIG_PM
-static int pxa168_eth_resume(struct platform_device *pdev)
-{
- return -ENOSYS;
-}
-
-static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
-{
- return -ENOSYS;
-}
-
-#else
-#define pxa168_eth_resume NULL
-#define pxa168_eth_suspend NULL
-#endif
-
-static struct platform_driver pxa168_eth_driver = {
- .probe = pxa168_eth_probe,
- .remove = pxa168_eth_remove,
- .shutdown = pxa168_eth_shutdown,
- .resume = pxa168_eth_resume,
- .suspend = pxa168_eth_suspend,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-static int __init pxa168_init_module(void)
-{
- return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
- platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
-MODULE_ALIAS("platform:pxa168_eth");
diff --git a/trunk/drivers/net/qlcnic/qlcnic_main.c b/trunk/drivers/net/qlcnic/qlcnic_main.c
index 66eea5972020..bf6d87adda4f 100644
--- a/trunk/drivers/net/qlcnic/qlcnic_main.c
+++ b/trunk/drivers/net/qlcnic/qlcnic_main.c
@@ -1983,6 +1983,8 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
+ memset(stats, 0, sizeof(*stats));
+
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2188,16 +2190,9 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
-
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_intr(adapter->irq, sds_ring);
- }
+ qlcnic_intr(adapter->irq, adapter);
enable_irq(adapter->irq);
}
#endif
diff --git a/trunk/drivers/net/qlge/qlge_main.c b/trunk/drivers/net/qlge/qlge_main.c
index 5f89e83501f4..8d63f69b27d9 100644
--- a/trunk/drivers/net/qlge/qlge_main.c
+++ b/trunk/drivers/net/qlge/qlge_main.c
@@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
+ ql_free_rx_buffers(qdev);
+
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
- ql_free_rx_buffers(qdev);
-
return status;
}
diff --git a/trunk/drivers/net/sh_eth.c b/trunk/drivers/net/sh_eth.c
index 79fd02bc69fd..f5a9eb1df593 100644
--- a/trunk/drivers/net/sh_eth.c
+++ b/trunk/drivers/net/sh_eth.c
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
static int sh_eth_drv_probe(struct platform_device *pdev)
{
- int ret, devno = 0;
+ int ret, i, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
diff --git a/trunk/drivers/net/usb/ipheth.c b/trunk/drivers/net/usb/ipheth.c
index 8ed30fa35d0a..08e7b6abacdd 100644
--- a/trunk/drivers/net/usb/ipheth.c
+++ b/trunk/drivers/net/usb/ipheth.c
@@ -58,7 +58,6 @@
#define USB_PRODUCT_IPHONE 0x1290
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
-#define USB_PRODUCT_IPHONE_4 0x1297
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -93,10 +92,6 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/trunk/drivers/net/wireless/adm8211.c b/trunk/drivers/net/wireless/adm8211.c
index f9aa1bc0a947..a105087af963 100644
--- a/trunk/drivers/net/wireless/adm8211.c
+++ b/trunk/drivers/net/wireless/adm8211.c
@@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
/* Nothing to do for ADMtek BBP */
} else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
- wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
+ wiphy_debug(dev->wiphy, "unsupported bbp type %d\n",
priv->bbp_type);
ADM8211_RESTORE();
@@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
break;
}
} else
- wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
+ wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type);
ADM8211_CSR_WRITE(SYNRF, 0);
@@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
retval = request_irq(priv->pdev->irq, adm8211_interrupt,
IRQF_SHARED, "adm8211", dev);
if (retval) {
- wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
+ wiphy_err(dev->wiphy, "failed to register irq handler\n");
goto fail;
}
@@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
goto err_free_eeprom;
}
- wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
+ wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n",
dev->wiphy->perm_addr, pdev->revision);
return 0;
diff --git a/trunk/drivers/net/wireless/at76c50x-usb.c b/trunk/drivers/net/wireless/at76c50x-usb.c
index 1128fa8c9ed5..d5140a87f073 100644
--- a/trunk/drivers/net/wireless/at76c50x-usb.c
+++ b/trunk/drivers/net/wireless/at76c50x-usb.c
@@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv)
exit:
kfree(hwcfg);
if (ret < 0)
- wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
+ wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n",
ret);
return ret;
@@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
sizeof(struct mib_mac_addr));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (MAC_ADDR) failed: %d\n", ret);
+ "at76_get_mib (mac_addr) failed: %d\n", ret);
goto exit;
}
@@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
sizeof(struct mib_mac_wep));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (MAC_WEP) failed: %d\n", ret);
+ "at76_get_mib (mac_wep) failed: %d\n", ret);
goto exit;
}
@@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
sizeof(struct mib_mac_mgmt));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (MAC_MGMT) failed: %d\n", ret);
+ "at76_get_mib (mac_mgmt) failed: %d\n", ret);
goto exit;
}
@@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (MAC) failed: %d\n", ret);
+ "at76_get_mib (mac) failed: %d\n", ret);
goto exit;
}
@@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (PHY) failed: %d\n", ret);
+ "at76_get_mib (phy) failed: %d\n", ret);
goto exit;
}
@@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (LOCAL) failed: %d\n", ret);
+ "at76_get_mib (local) failed: %d\n", ret);
goto exit;
}
@@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
sizeof(struct mib_mdomain));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (MDOMAIN) failed: %d\n", ret);
+ "at76_get_mib (mdomain) failed: %d\n", ret);
goto exit;
}
@@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
struct sk_buff *skb = priv->rx_skb;
if (!priv->rx_urb) {
- wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
+ wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n",
__func__);
return -EFAULT;
}
@@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
if (ret == -EINVAL)
wiphy_err(priv->hw->wiphy,
- "-EINVAL: tx urb %p hcpriv %p complete %p\n",
+ "-einval: tx urb %p hcpriv %p complete %p\n",
priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
}
@@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->mac80211_registered = 1;
- wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
+ wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n",
dev_name(&interface->dev), priv->mac_addr,
priv->fw_version.major, priv->fw_version.minor,
priv->fw_version.patch, priv->fw_version.build);
diff --git a/trunk/drivers/net/wireless/ath/ar9170/main.c b/trunk/drivers/net/wireless/ath/ar9170/main.c
index debfb0fbc7c5..c67b05f3bcbd 100644
--- a/trunk/drivers/net/wireless/ath/ar9170/main.c
+++ b/trunk/drivers/net/wireless/ath/ar9170/main.c
@@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
{
int i;
- wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
+ wiphy_debug(ar->hw->wiphy, "qos queue stats\n");
for (i = 0; i < __AR9170_NUM_TXQ; i++)
wiphy_debug(ar->hw->wiphy,
@@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
#ifdef AR9170_QUEUE_DEBUG
wiphy_debug(ar->hw->wiphy,
- "skip frame => DA %pM != %pM\n",
+ "skip frame => da %pm != %pm\n",
mac, ieee80211_get_DA(hdr));
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c
index d77ce9906b6c..0d5de2574dd1 100644
--- a/trunk/drivers/net/wireless/ath/ath5k/base.c
+++ b/trunk/drivers/net/wireless/ath/ath5k/base.c
@@ -48,7 +48,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -477,26 +476,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
int ret;
u8 csz;
- /*
- * L0s needs to be disabled on all ath5k cards.
- *
- * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
- * by default in the future in 2.6.36) this will also mean both L1 and
- * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
- * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
- * though but cannot currently undue the effect of a blacklist, for
- * details you can read pcie_aspm_sanity_check() and see how it adjusts
- * the device link capability.
- *
- * It may be possible in the future to implement some PCI API to allow
- * drivers to override blacklists for pre 1.1 PCIe but for now it is
- * best to accept that both L0s and L1 will be disabled completely for
- * distributions shipping with CONFIG_PCIEASPM rather than having this
- * issue present. Motivation for adding this new API will be to help
- * with power consumption for some of these devices.
- */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
@@ -1327,10 +1306,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
PCI_DMA_TODEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
- if (!rate) {
- ret = -EINVAL;
- goto err_unmap;
- }
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 057fb69ddf7f..b883b174385b 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length = block[it+1];
length &= 0xff;
- if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
+ if (length > 0 && spot >= 0 && spot+length < mdataSize) {
ath_print(common, ATH_DBG_EEPROM,
"Restore at %d: spot=%d "
"offset=%d length=%d\n",
diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h
index 0b09db0f8e7d..8750c558c221 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 0xf
+#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
@@ -191,7 +191,6 @@
#define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1
#define AR9287_EEP_START_LOC 128
-#define AR9287_HTC_EEP_START_LOC 256
#define AR9287_NUM_2G_CAL_PIERS 3
#define AR9287_NUM_2G_CCK_TARGET_POWERS 3
#define AR9287_NUM_2G_20_TARGET_POWERS 3
diff --git a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index dff2da777312..4a52cf03808b 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -34,14 +34,9 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data;
- int addr, eep_start_loc;
+ int addr, eep_start_loc = AR9287_EEP_START_LOC;
eep_data = (u16 *)eep;
- if (ah->hw_version.devid == 0x7015)
- eep_start_loc = AR9287_HTC_EEP_START_LOC;
- else
- eep_start_loc = AR9287_EEP_START_LOC;
-
if (!ath9k_hw_use_flash(ah)) {
ath_print(common, ATH_DBG_EEPROM,
"Reading from EEPROM, not flash\n");
diff --git a/trunk/drivers/net/wireless/ath/ath9k/hif_usb.c b/trunk/drivers/net/wireless/ath/ath9k/hif_usb.c
index 17e7a9a367e7..61c1bee3f26a 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -799,7 +799,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
}
kfree(buf);
- if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015))
+ if (hif_dev->device_id == 0x7010)
firm_offset = AR7010_FIRMWARE_TEXT;
else
firm_offset = AR9271_FIRMWARE_TEXT;
@@ -901,7 +901,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
switch(hif_dev->device_id) {
case 0x7010:
- case 0x7015:
case 0x9018:
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
@@ -913,6 +912,11 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
break;
}
+ if (!hif_dev->fw_name) {
+ dev_err(&udev->dev, "Can't determine firmware !\n");
+ goto err_htc_hw_alloc;
+ }
+
ret = ath9k_hif_usb_dev_init(hif_dev);
if (ret) {
ret = -EINVAL;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 2d4279191d7a..148b43317fdb 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -245,7 +245,6 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
switch(devid) {
case 0x7010:
- case 0x7015:
case 0x9018:
priv->htc->credits = 45;
break;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7d09b4b17bbd..ebed9d1691a5 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -366,8 +366,7 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
caps = WLAN_RC_HT_FLAG;
if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- (conf_is_ht40(&priv->hw->conf)))
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
caps |= WLAN_RC_40_FLAG;
if (conf_is_ht40(&priv->hw->conf) &&
(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2a6e45a293a9..bd0b4acc3ece 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -78,23 +78,18 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
struct ath9k_htc_tx_ctl tx_ctl;
enum htc_endpoint_id epid;
u16 qnum;
__le16 fc;
u8 *tx_fhdr;
- u8 sta_idx, vif_idx;
+ u8 sta_idx;
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
- if (tx_info->control.vif &&
- (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
- vif_idx = ((struct ath9k_htc_vif *)
- tx_info->control.vif->drv_priv)->index;
- else
- vif_idx = priv->nvifs;
-
+ avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
if (sta) {
ista = (struct ath9k_htc_sta *) sta->drv_priv;
sta_idx = ista->index;
@@ -111,7 +106,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
tx_hdr.node_idx = sta_idx;
- tx_hdr.vif_idx = vif_idx;
+ tx_hdr.vif_idx = avp->index;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
tx_ctl.type = ATH9K_HTC_AMPDU;
@@ -174,7 +169,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
tx_ctl.type = ATH9K_HTC_NORMAL;
mgmt_hdr.node_idx = sta_idx;
- mgmt_hdr.vif_idx = vif_idx;
+ mgmt_hdr.vif_idx = avp->index;
mgmt_hdr.tidno = 0;
mgmt_hdr.flags = 0;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/reg.h b/trunk/drivers/net/wireless/ath/ath9k/reg.h
index d01c4adab8d6..633e3d949ec0 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/reg.h
+++ b/trunk/drivers/net/wireless/ath/ath9k/reg.h
@@ -899,7 +899,6 @@
#define AR_DEVID_7010(_ah) \
(((_ah)->hw_version.devid == 0x7010) || \
- ((_ah)->hw_version.devid == 0x7015) || \
((_ah)->hw_version.devid == 0x9018))
#define AR_RADIO_SREV_MAJOR 0xf0
diff --git a/trunk/drivers/net/wireless/ath/regd.h b/trunk/drivers/net/wireless/ath/regd.h
index 345dd9721b41..a1c39526161a 100644
--- a/trunk/drivers/net/wireless/ath/regd.h
+++ b/trunk/drivers/net/wireless/ath/regd.h
@@ -31,6 +31,7 @@ enum ctl_group {
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
+#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c
index 996e9d7d7586..16bbfa3189a5 100644
--- a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2723,6 +2723,14 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
packet = &priv->rx_buffers[i];
+ /* Sync the DMA for the STATUS buffer so CPU is sure to get
+ * the correct values */
+ pci_dma_sync_single_for_cpu(priv->pci_dev,
+ sq->nic +
+ sizeof(struct ipw2100_status) * i,
+ sizeof(struct ipw2100_status),
+ PCI_DMA_FROMDEVICE);
+
/* Sync the DMA for the RX buffer so CPU is sure to get
* the correct values */
pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
@@ -6657,13 +6665,12 @@ static int __init ipw2100_init(void)
printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
- pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
ret = pci_register_driver(&ipw2100_pci_driver);
if (ret)
goto out;
+ pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
ret = driver_create_file(&ipw2100_pci_driver.driver,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0b779a41a142..fec026212326 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 128,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 128,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8ccfcd08218d..6950a783913b 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.led_compensation = 64,
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.tx_power_by_driver = true,
};
@@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.led_compensation = 64,
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.tx_power_by_driver = true,
};
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c
index d92b72909233..d6da356608fa 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.temperature_kelvin = true,
.max_event_log_size = 512,
.tx_power_by_driver = true,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c
index 48bdcd8d2e94..aacf3770f075 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c
index cee06b968de8..af4fd50f3405 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c
index 10d7b9b7f064..c1882fd8345d 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3667,49 +3667,6 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- priv->staging_rxon.filter_flags &= ~filter_nand;
- priv->staging_rxon.filter_flags |= filter_or;
-
- iwlcore_commit_rxon(priv);
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = hw->priv;
@@ -3910,7 +3867,7 @@ static struct ieee80211_ops iwl_hw_ops = {
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.config = iwl_mac_config,
- .configure_filter = iwlagn_configure_filter,
+ .configure_filter = iwl_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.conf_tx = iwl_mac_conf_tx,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c
index 07dbc2796448..2c03c6e20a72 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1328,6 +1328,51 @@ int iwl_apm_init(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_apm_init);
+
+void iwl_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ iwlcore_commit_rxon(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+EXPORT_SYMBOL(iwl_configure_filter);
+
int iwl_set_hw_params(struct iwl_priv *priv)
{
priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h
index 5e6ee3da6bbf..4a71dfb10a15 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -372,6 +372,9 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
u32 decrypt_res,
struct ieee80211_rx_status *stats);
void iwl_irq_handle_error(struct iwl_priv *priv);
+void iwl_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
int iwl_set_hw_params(struct iwl_priv *priv);
void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2e97cd2fa98a..f35bcad56e36 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1049,8 +1049,7 @@ struct iwl_event_log {
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
/* timer constants use to monitor and recover stuck tx queues in mSecs */
-#define IWL_DEF_MONITORING_PERIOD (1000)
-#define IWL_LONG_MONITORING_PERIOD (5000)
+#define IWL_MONITORING_PERIOD (1000)
#define IWL_ONE_HUNDRED_MSECS (100)
#define IWL_SIXTY_SECS (60000)
diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 59a308b02f95..70c4b8fba0ee 100644
--- a/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/trunk/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3391,55 +3391,6 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
return 0;
}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- priv->staging_rxon.filter_flags &= ~filter_nand;
- priv->staging_rxon.filter_flags |= filter_or;
-
- /*
- * Committing directly here breaks for some reason,
- * but we'll eventually commit the filter flags
- * change anyway.
- */
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
/*****************************************************************************
*
* sysfs attributes
@@ -3845,7 +3796,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.config = iwl_mac_config,
- .configure_filter = iwl3945_configure_filter,
+ .configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
diff --git a/trunk/drivers/net/wireless/libertas/if_sdio.c b/trunk/drivers/net/wireless/libertas/if_sdio.c
index 87b634978b35..ba854c70ab94 100644
--- a/trunk/drivers/net/wireless/libertas/if_sdio.c
+++ b/trunk/drivers/net/wireless/libertas/if_sdio.c
@@ -128,7 +128,7 @@ struct if_sdio_card {
bool helper_allocated;
bool firmware_allocated;
- u8 buffer[65536] __attribute__((aligned(4)));
+ u8 buffer[65536];
spinlock_t lock;
struct if_sdio_packet *packets;
diff --git a/trunk/drivers/net/wireless/mac80211_hwsim.c b/trunk/drivers/net/wireless/mac80211_hwsim.c
index 86fa8abdd66f..01ad7f77383a 100644
--- a/trunk/drivers/net/wireless/mac80211_hwsim.c
+++ b/trunk/drivers/net/wireless/mac80211_hwsim.c
@@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status;
if (data->idle) {
- wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+ wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n");
return false;
}
diff --git a/trunk/drivers/net/wireless/mwl8k.c b/trunk/drivers/net/wireless/mwl8k.c
index f152a25be59f..d761ed2d8af4 100644
--- a/trunk/drivers/net/wireless/mwl8k.c
+++ b/trunk/drivers/net/wireless/mwl8k.c
@@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
if (rxq->rxd == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
+ wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n");
return -ENOMEM;
}
memset(rxq->rxd, 0, size);
rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
+ wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
@@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
if (txq->txd == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
+ wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n");
return -ENOMEM;
}
memset(txq->txd, 0, size);
txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
+ wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
@@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
PCI_DMA_BIDIRECTIONAL);
if (!timeout) {
- wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
+ wiphy_err(hw->wiphy, "command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
rc = -ETIMEDOUT;
@@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
rc = cmd->result ? -EINVAL : 0;
if (rc)
- wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
+ wiphy_err(hw->wiphy, "command %s error 0x%x\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
le16_to_cpu(cmd->result));
else if (ms > 2000)
- wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
+ wiphy_notice(hw->wiphy, "command %s took %d ms\n",
mwl8k_cmd_name(cmd->code,
buf, sizeof(buf)),
ms);
@@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
- wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
+ wiphy_err(hw->wiphy, "failed to register irq handler\n");
return -EIO;
}
@@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
- wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
+ wiphy_err(hw->wiphy, "cannot map device sram\n");
goto err_iounmap;
}
@@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (priv->regs == NULL) {
priv->regs = pci_iomap(pdev, 2, 0x10000);
if (priv->regs == NULL) {
- wiphy_err(hw->wiphy, "Cannot map device registers\n");
+ wiphy_err(hw->wiphy, "cannot map device registers\n");
goto err_iounmap;
}
}
@@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/* Ask userland hotplug daemon for the device firmware */
rc = mwl8k_request_firmware(priv);
if (rc) {
- wiphy_err(hw->wiphy, "Firmware files not found\n");
+ wiphy_err(hw->wiphy, "firmware files not found\n");
goto err_stop_firmware;
}
/* Load firmware into hardware */
rc = mwl8k_load_firmware(hw);
if (rc) {
- wiphy_err(hw->wiphy, "Cannot start firmware\n");
+ wiphy_err(hw->wiphy, "cannot start firmware\n");
goto err_stop_firmware;
}
@@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
- wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
+ wiphy_err(hw->wiphy, "failed to register irq handler\n");
goto err_free_queues;
}
@@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = mwl8k_cmd_get_hw_spec_sta(hw);
}
if (rc) {
- wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
+ wiphy_err(hw->wiphy, "cannot initialise firmware\n");
goto err_free_irq;
}
@@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/* Turn radio off */
rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
- wiphy_err(hw->wiphy, "Cannot disable\n");
+ wiphy_err(hw->wiphy, "cannot disable\n");
goto err_free_irq;
}
/* Clear MAC address */
rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
- wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
+ wiphy_err(hw->wiphy, "cannot clear mac address\n");
goto err_free_irq;
}
@@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = ieee80211_register_hw(hw);
if (rc) {
- wiphy_err(hw->wiphy, "Cannot register device\n");
+ wiphy_err(hw->wiphy, "cannot register device\n");
goto err_free_queues;
}
diff --git a/trunk/drivers/net/wireless/p54/eeprom.c b/trunk/drivers/net/wireless/p54/eeprom.c
index 78347041ec40..d687cb7f2a59 100644
--- a/trunk/drivers/net/wireless/p54/eeprom.c
+++ b/trunk/drivers/net/wireless/p54/eeprom.c
@@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
}
if (j == 0) {
- wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
+ wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n",
(band == IEEE80211_BAND_2GHZ) ? 2 : 5);
ret = -ENODATA;
@@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
u8 perm_addr[ETH_ALEN];
wiphy_warn(dev->wiphy,
- "Invalid hwaddr! Using randomly generated MAC addr\n");
+ "invalid hwaddr! using randomly generated mac addr\n");
random_ether_addr(perm_addr);
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
- wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
+ wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n",
dev->wiphy->perm_addr, priv->version,
p54_rf_chips[priv->rxhw]);
diff --git a/trunk/drivers/net/wireless/p54/fwio.c b/trunk/drivers/net/wireless/p54/fwio.c
index 15b20c29a604..47006bca4852 100644
--- a/trunk/drivers/net/wireless/p54/fwio.c
+++ b/trunk/drivers/net/wireless/p54/fwio.c
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
if (fw_version)
wiphy_info(priv->hw->wiphy,
- "FW rev %s - Softmac protocol %x.%x\n",
+ "fw rev %s - softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
if (priv->fw_var < 0x500)
diff --git a/trunk/drivers/net/wireless/p54/led.c b/trunk/drivers/net/wireless/p54/led.c
index 3837e1eec5f4..ea91f5cce6b3 100644
--- a/trunk/drivers/net/wireless/p54/led.c
+++ b/trunk/drivers/net/wireless/p54/led.c
@@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work)
err = p54_set_leds(priv);
if (err && net_ratelimit())
wiphy_err(priv->hw->wiphy,
- "failed to update LEDs (%d).\n", err);
+ "failed to update leds (%d).\n", err);
if (rerun)
ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv,
err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
if (err)
wiphy_err(priv->hw->wiphy,
- "Failed to register %s LED.\n", name);
+ "failed to register %s led.\n", name);
else
led->registered = 1;
diff --git a/trunk/drivers/net/wireless/p54/p54pci.c b/trunk/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..822f8dc26e9c 100644
--- a/trunk/drivers/net/wireless/p54/p54pci.c
+++ b/trunk/drivers/net/wireless/p54/p54pci.c
@@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev)
P54P_READ(dev_int);
if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
- wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
+ wiphy_err(dev->wiphy, "cannot boot firmware!\n");
p54p_stop(dev);
return -ETIMEDOUT;
}
diff --git a/trunk/drivers/net/wireless/p54/txrx.c b/trunk/drivers/net/wireless/p54/txrx.c
index 0e937dc0c9c4..427b46f558ed 100644
--- a/trunk/drivers/net/wireless/p54/txrx.c
+++ b/trunk/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
- !(payload->status & P54_TX_FAILED))
+ (!payload->status))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
@@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
case P54_TRAP_BEACON_TX:
break;
case P54_TRAP_RADAR:
- wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
+ wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq);
break;
case P54_TRAP_NO_BEACON:
if (priv->vif)
diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c b/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 30107ce78dfb..b50c39aaec05 100644
--- a/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
&priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
- wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
+ wiphy_err(dev->wiphy, "cannot allocate rx ring\n");
return -ENOMEM;
}
@@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
if (!ring || (unsigned long)ring & 0xFF) {
- wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
+ wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n",
prio);
return -ENOMEM;
}
@@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret) {
- wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
+ wiphy_err(dev->wiphy, "failed to register irq handler\n");
goto err_free_rings;
}
diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8187_dev.c b/trunk/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 98e0351c1dd6..5738a55c1b06 100644
--- a/trunk/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/trunk/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
} while (--i);
if (!i) {
- wiphy_err(dev->wiphy, "Reset timeout!\n");
+ wiphy_err(dev->wiphy, "reset timeout!\n");
return -ETIMEDOUT;
}
@@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
- wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
+ wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n",
mac_addr, chip_name, priv->asic_rev, priv->rf->name,
priv->rfkill_mask);
diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/trunk/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index 97eebdcf7eb9..fd96f9112322 100644
--- a/trunk/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/trunk/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
rtl8225_write(dev, 0x02, 0x044d);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
- wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
+ wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
rtl8225_read(dev, 6));
}
@@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
rtl8225_write(dev, 0x02, 0x044D);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
- wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
+ wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
rtl8225_read(dev, 6));
}
diff --git a/trunk/drivers/net/wireless/wl12xx/wl1251_cmd.c b/trunk/drivers/net/wireless/wl12xx/wl1251_cmd.c
index ce3722f4c3e3..a37b30cef489 100644
--- a/trunk/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/trunk/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
cmd->timeout = timeout;
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("cmd trigger scan to failed: %d", ret);
goto out;
diff --git a/trunk/drivers/oprofile/buffer_sync.c b/trunk/drivers/oprofile/buffer_sync.c
index b7e755f4178a..a9352b2c7ac4 100644
--- a/trunk/drivers/oprofile/buffer_sync.c
+++ b/trunk/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,16 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
+
+static void end_sync(void)
+{
+ end_cpu_work();
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+}
+
+
int sync_start(void)
{
int err;
@@ -148,7 +158,7 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- mutex_lock(&buffer_mutex);
+ start_cpu_work();
err = task_handoff_register(&task_free_nb);
if (err)
@@ -163,10 +173,7 @@ int sync_start(void)
if (err)
goto out4;
- start_cpu_work();
-
out:
- mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -175,6 +182,7 @@ int sync_start(void)
out2:
task_handoff_unregister(&task_free_nb);
out1:
+ end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -182,20 +190,11 @@ int sync_start(void)
void sync_stop(void)
{
- /* flush buffers */
- mutex_lock(&buffer_mutex);
- end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- mutex_unlock(&buffer_mutex);
- flush_scheduled_work();
-
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-
+ end_sync();
free_cpumask_var(marked_cpus);
}
diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c
index f179ac2ea801..219f79e2210a 100644
--- a/trunk/drivers/oprofile/cpu_buffer.c
+++ b/trunk/drivers/oprofile/cpu_buffer.c
@@ -120,6 +120,8 @@ void end_cpu_work(void)
cancel_delayed_work(&b->work);
}
+
+ flush_scheduled_work();
}
/*
diff --git a/trunk/drivers/pci/hotplug/acpi_pcihp.c b/trunk/drivers/pci/hotplug/acpi_pcihp.c
index 3bc72d18b121..45fcc1e96df9 100644
--- a/trunk/drivers/pci/hotplug/acpi_pcihp.c
+++ b/trunk/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,7 +338,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+ flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
+ OSC_SHPC_NATIVE_HP_CONTROL |
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -358,7 +360,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s\n",
(char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, &flags, flags);
+ status = acpi_pci_osc_control_set(handle, flags);
if (ACPI_SUCCESS(status))
goto got_one;
if (status == AE_SUPPORT)
diff --git a/trunk/drivers/pci/hotplug/pciehp.h b/trunk/drivers/pci/hotplug/pciehp.h
index 73d513989263..4ed76b47b6dc 100644
--- a/trunk/drivers/pci/hotplug/pciehp.h
+++ b/trunk/drivers/pci/hotplug/pciehp.h
@@ -176,11 +176,19 @@ static inline void pciehp_firmware_init(void)
{
pciehp_acpi_slot_detection_init();
}
-#else
-#define pciehp_firmware_init() do {} while (0)
-static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+
+static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- return 0;
+ int retval;
+ u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
+ if (retval)
+ return retval;
+ return pciehp_acpi_slot_detection_check(dev);
}
+#else
+#define pciehp_firmware_init() do {} while (0)
+#define pciehp_get_hp_hw_control_from_firmware(dev) 0
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/trunk/drivers/pci/hotplug/pciehp_acpi.c b/trunk/drivers/pci/hotplug/pciehp_acpi.c
index 2574700db461..1f4000a5a108 100644
--- a/trunk/drivers/pci/hotplug/pciehp_acpi.c
+++ b/trunk/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,7 +85,9 @@ static int __init dummy_probe(struct pcie_device *dev)
acpi_handle handle;
struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
-
+ /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
+ if (pciehp_get_hp_hw_control_from_firmware(pdev))
+ return -ENODEV;
pos = pci_pcie_cap(pdev);
if (!pos)
return -ENODEV;
diff --git a/trunk/drivers/pci/hotplug/pciehp_core.c b/trunk/drivers/pci/hotplug/pciehp_core.c
index aa5f3ff629ff..3588ea61b0dd 100644
--- a/trunk/drivers/pci/hotplug/pciehp_core.c
+++ b/trunk/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
dev_info(&dev->device,
"Bypassing BIOS check for pciehp use on %s\n",
pci_name(dev->port));
- else if (pciehp_acpi_slot_detection_check(dev->port))
+ else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
goto err_out_none;
ctrl = pcie_init(dev);
diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h
index 7754a678ab15..679c39de6a89 100644
--- a/trunk/drivers/pci/pci.h
+++ b/trunk/drivers/pci/pci.h
@@ -140,10 +140,8 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
-bool pci_aer_available(void);
#else
static inline void pci_no_aer(void) { }
-static inline bool pci_aer_available(void) { return false; }
#endif
static inline int pci_no_d1d2(struct pci_dev *dev)
diff --git a/trunk/drivers/pci/pcie/Makefile b/trunk/drivers/pci/pcie/Makefile
index 00c62df5a9fc..ea654545e7c4 100644
--- a/trunk/drivers/pci/pcie/Makefile
+++ b/trunk/drivers/pci/pcie/Makefile
@@ -6,11 +6,10 @@
obj-$(CONFIG_PCIEASPM) += aspm.o
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
-pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
# Build PCI Express AER if needed
obj-$(CONFIG_PCIEAER) += aer/
-obj-$(CONFIG_PCIE_PME) += pme.o
+obj-$(CONFIG_PCIE_PME) += pme/
diff --git a/trunk/drivers/pci/pcie/aer/aerdrv.c b/trunk/drivers/pci/pcie/aer/aerdrv.c
index f409948e1a9b..484cc55194b8 100644
--- a/trunk/drivers/pci/pcie/aer/aerdrv.c
+++ b/trunk/drivers/pci/pcie/aer/aerdrv.c
@@ -72,11 +72,6 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
-bool pci_aer_available(void)
-{
- return !pcie_aer_disable && pci_msi_enabled();
-}
-
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -416,7 +411,9 @@ static void aer_error_resume(struct pci_dev *dev)
*/
static int __init aer_service_init(void)
{
- if (!pci_aer_available())
+ if (pcie_aer_disable)
+ return -ENXIO;
+ if (!pci_msi_enabled())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c b/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c
index 2bb9b8972211..f278d7b0d95d 100644
--- a/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,6 +19,42 @@
#include
#include "aerdrv.h"
+/**
+ * aer_osc_setup - run ACPI _OSC method
+ * @pciedev: pcie_device which AER is being enabled on
+ *
+ * @return: Zero on success. Nonzero otherwise.
+ *
+ * Invoked when PCIe bus loads AER service driver. To avoid conflict with
+ * BIOS AER support requires BIOS to yield AER control to OS native driver.
+ **/
+int aer_osc_setup(struct pcie_device *pciedev)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct pci_dev *pdev = pciedev->port;
+ acpi_handle handle = NULL;
+
+ if (acpi_pci_disabled)
+ return -1;
+
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (handle) {
+ status = acpi_pci_osc_control_set(handle,
+ OSC_PCI_EXPRESS_AER_CONTROL |
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
+ "init device: %s\n",
+ (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
+ "no _OSC support" : "_OSC failed");
+ return -1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c
index 29e268fadf14..fc0b5a93e1de 100644
--- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,10 +772,22 @@ void aer_isr(struct work_struct *work)
*/
int aer_init(struct pcie_device *dev)
{
+ if (pcie_aer_get_firmware_first(dev->port)) {
+ dev_printk(KERN_DEBUG, &dev->device,
+ "PCIe errors handled by platform firmware.\n");
+ goto out;
+ }
+
+ if (aer_osc_setup(dev))
+ goto out;
+
+ return 0;
+out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
pcie_aer_force_firmware_first(dev->port, 0);
+ return 0;
}
- return 0;
+ return -ENXIO;
}
diff --git a/trunk/drivers/pci/pcie/pme/Makefile b/trunk/drivers/pci/pcie/pme/Makefile
new file mode 100644
index 000000000000..8b9238053080
--- /dev/null
+++ b/trunk/drivers/pci/pcie/pme/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for PCI-Express Root Port PME signaling driver
+#
+
+obj-$(CONFIG_PCIE_PME) += pmedriver.o
+
+pmedriver-objs := pcie_pme.o
+pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/trunk/drivers/pci/pcie/pme.c b/trunk/drivers/pci/pcie/pme/pcie_pme.c
similarity index 83%
rename from trunk/drivers/pci/pcie/pme.c
rename to trunk/drivers/pci/pcie/pme/pcie_pme.c
index 2f3c90407227..bbdea18693d9 100644
--- a/trunk/drivers/pci/pcie/pme.c
+++ b/trunk/drivers/pci/pcie/pme/pcie_pme.c
@@ -23,12 +23,37 @@
#include
#include
-#include "../pci.h"
-#include "portdrv.h"
+#include "../../pci.h"
+#include "pcie_pme.h"
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
+/*
+ * If set, this switch will prevent the PCIe root port PME service driver from
+ * being registered. Consequently, the interrupt-based PCIe PME signaling will
+ * not be used by any PCIe root ports in that case.
+ */
+static bool pcie_pme_disabled = true;
+
+/*
+ * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
+ * "In order to maintain compatibility with non-PCI Express-aware system
+ * software, system power management logic must be configured by firmware to use
+ * the legacy mechanism of signaling PME by default. PCI Express-aware system
+ * software must notify the firmware prior to enabling native, interrupt-based
+ * PME signaling." However, if the platform doesn't provide us with a suitable
+ * notification mechanism or the notification fails, it is not clear whether or
+ * not we are supposed to use the interrupt-based PCIe PME signaling. The
+ * switch below can be used to indicate the desired behaviour. When set, it
+ * will make the kernel use the interrupt-based PCIe PME signaling regardless of
+ * the platform notification status, although the kernel will attempt to notify
+ * the platform anyway. When unset, it will prevent the kernel from using the
+ * the interrupt-based PCIe PME signaling if the platform notification fails,
+ * which is the default.
+ */
+static bool pcie_pme_force_enable;
+
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -39,13 +64,38 @@ bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
- if (!strncmp(str, "nomsi", 5))
- pcie_pme_msi_disabled = true;
+ if (!strncmp(str, "auto", 4))
+ pcie_pme_disabled = false;
+ else if (!strncmp(str, "force", 5))
+ pcie_pme_force_enable = true;
+
+ str = strchr(str, ',');
+ if (str) {
+ str++;
+ str += strspn(str, " \t");
+ if (*str && !strcmp(str, "nomsi"))
+ pcie_pme_msi_disabled = true;
+ }
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
+/**
+ * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
+ * @srv: PCIe PME root port service to use for carrying out the check.
+ *
+ * Notify the platform that the native PCIe PME is going to be used and return
+ * 'true' if the control of the PCIe PME registers has been acquired from the
+ * platform.
+ */
+static bool pcie_pme_platform_setup(struct pcie_device *srv)
+{
+ if (!pcie_pme_platform_notify(srv))
+ return true;
+ return pcie_pme_force_enable;
+}
+
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
@@ -58,7 +108,7 @@ struct pcie_pme_service_data {
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
-void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
@@ -367,6 +417,9 @@ static int pcie_pme_probe(struct pcie_device *srv)
struct pcie_pme_service_data *data;
int ret;
+ if (!pcie_pme_platform_setup(srv))
+ return -EACCES;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -456,7 +509,8 @@ static struct pcie_port_service_driver pcie_pme_driver = {
*/
static int __init pcie_pme_service_init(void)
{
- return pcie_port_service_register(&pcie_pme_driver);
+ return pcie_pme_disabled ?
+ -ENODEV : pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);
diff --git a/trunk/drivers/pci/pcie/pme/pcie_pme.h b/trunk/drivers/pci/pcie/pme/pcie_pme.h
new file mode 100644
index 000000000000..b30d2b7c9775
--- /dev/null
+++ b/trunk/drivers/pci/pcie/pme/pcie_pme.h
@@ -0,0 +1,28 @@
+/*
+ * drivers/pci/pcie/pme/pcie_pme.h
+ *
+ * PCI Express Root Port PME signaling support
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki , Novell Inc.
+ */
+
+#ifndef _PCIE_PME_H_
+#define _PCIE_PME_H_
+
+struct pcie_device;
+
+#ifdef CONFIG_ACPI
+extern int pcie_pme_acpi_setup(struct pcie_device *srv);
+
+static inline int pcie_pme_platform_notify(struct pcie_device *srv)
+{
+ return pcie_pme_acpi_setup(srv);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_pme_platform_notify(struct pcie_device *srv)
+{
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+#endif
diff --git a/trunk/drivers/pci/pcie/pme/pcie_pme_acpi.c b/trunk/drivers/pci/pcie/pme/pcie_pme_acpi.c
new file mode 100644
index 000000000000..83ab2287ae3f
--- /dev/null
+++ b/trunk/drivers/pci/pcie/pme/pcie_pme_acpi.c
@@ -0,0 +1,54 @@
+/*
+ * PCIe Native PME support, ACPI-related part
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki , Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include
+#include
+#include