diff --git a/[refs] b/[refs]
index d8870eaa808d..cc75f485d85a 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 59be2e04e50ac9947e4356c10099f49977f5f74d
+refs/heads/master: b568be627a7270eba575bc4406a606e1545f91bb
diff --git a/trunk/Documentation/Changes b/trunk/Documentation/Changes
index f08b313cd235..6d0f1efc5bf6 100644
--- a/trunk/Documentation/Changes
+++ b/trunk/Documentation/Changes
@@ -49,8 +49,6 @@ o oprofile 0.9 # oprofiled --version
o udev 081 # udevinfo -V
o grub 0.93 # grub --version
o mcelog 0.6
-o iptables 1.4.1 # iptables -V
-
Kernel compilation
==================
diff --git a/trunk/Documentation/DocBook/Makefile b/trunk/Documentation/DocBook/Makefile
index 325cfd1d6d99..ab8300f67182 100644
--- a/trunk/Documentation/DocBook/Makefile
+++ b/trunk/Documentation/DocBook/Makefile
@@ -8,7 +8,7 @@
DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
- writing_usb_driver.xml networking.xml \
+ procfs-guide.xml writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
@@ -32,10 +32,10 @@ PS_METHOD = $(prefer-db2x)
###
# The targets that may be used.
-PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs xmldoclinks
+PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs media
BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
-xmldocs: $(BOOKS) xmldoclinks
+xmldocs: $(BOOKS)
sgmldocs: xmldocs
PS := $(patsubst %.xml, %.ps, $(BOOKS))
@@ -45,24 +45,15 @@ PDF := $(patsubst %.xml, %.pdf, $(BOOKS))
pdfdocs: $(PDF)
HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
-htmldocs: $(HTML)
+htmldocs: media $(HTML)
$(call build_main_index)
- $(call build_images)
MAN := $(patsubst %.xml, %.9, $(BOOKS))
mandocs: $(MAN)
-build_images = mkdir -p $(objtree)/Documentation/DocBook/media/ && \
- cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(objtree)/Documentation/DocBook/media/
-
-xmldoclinks:
-ifneq ($(objtree),$(srctree))
- for dep in dvb media-entities.tmpl media-indices.tmpl v4l; do \
- rm -f $(objtree)/Documentation/DocBook/$$dep \
- && ln -s $(srctree)/Documentation/DocBook/$$dep $(objtree)/Documentation/DocBook/ \
- || exit; \
- done
-endif
+media:
+ mkdir -p $(srctree)/Documentation/DocBook/media/
+ cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(srctree)/Documentation/DocBook/media/
installmandocs: mandocs
mkdir -p /usr/local/man/man9/
@@ -74,7 +65,7 @@ KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/basic/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
-XMLTOFLAGS += --skip-validation
+#XMLTOFLAGS += --skip-validation
###
# DOCPROC is used for two purposes:
@@ -110,6 +101,17 @@ endif
# Changes in kernel-doc force a rebuild of all documentation
$(BOOKS): $(KERNELDOC)
+###
+# procfs guide uses a .c file as example code.
+# This requires an explicit dependency
+C-procfs-example = procfs_example.xml
+C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
+$(obj)/procfs-guide.xml: $(C-procfs-example2)
+
+# List of programs to build
+##oops, this is a kernel module::hostprogs-y := procfs_example
+obj-m += procfs_example.o
+
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -236,7 +238,7 @@ clean-files := $(DOCBOOKS) \
$(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
$(patsubst %.xml, %.html, $(DOCBOOKS)) \
$(patsubst %.xml, %.9, $(DOCBOOKS)) \
- $(index)
+ $(C-procfs-example) $(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
diff --git a/trunk/Documentation/DocBook/media-entities.tmpl b/trunk/Documentation/DocBook/media-entities.tmpl
index c725cb852c54..bb5ab741220e 100644
--- a/trunk/Documentation/DocBook/media-entities.tmpl
+++ b/trunk/Documentation/DocBook/media-entities.tmpl
@@ -23,7 +23,6 @@
VIDIOC_ENUMINPUT ">
VIDIOC_ENUMOUTPUT ">
VIDIOC_ENUMSTD ">
-VIDIOC_ENUM_DV_PRESETS ">
VIDIOC_ENUM_FMT ">
VIDIOC_ENUM_FRAMEINTERVALS ">
VIDIOC_ENUM_FRAMESIZES ">
@@ -31,8 +30,6 @@
VIDIOC_G_AUDOUT ">
VIDIOC_G_CROP ">
VIDIOC_G_CTRL ">
-VIDIOC_G_DV_PRESET ">
-VIDIOC_G_DV_TIMINGS ">
VIDIOC_G_ENC_INDEX ">
VIDIOC_G_EXT_CTRLS ">
VIDIOC_G_FBUF ">
@@ -56,7 +53,6 @@
VIDIOC_QUERYCTRL ">
VIDIOC_QUERYMENU ">
VIDIOC_QUERYSTD ">
-VIDIOC_QUERY_DV_PRESET ">
VIDIOC_REQBUFS ">
VIDIOC_STREAMOFF ">
VIDIOC_STREAMON ">
@@ -64,8 +60,6 @@
VIDIOC_S_AUDOUT ">
VIDIOC_S_CROP ">
VIDIOC_S_CTRL ">
-VIDIOC_S_DV_PRESET ">
-VIDIOC_S_DV_TIMINGS ">
VIDIOC_S_EXT_CTRLS ">
VIDIOC_S_FBUF ">
VIDIOC_S_FMT ">
@@ -124,7 +118,6 @@
v4l2_audio">
v4l2_audioout">
-v4l2_bt_timings">
v4l2_buffer">
v4l2_capability">
v4l2_captureparm">
@@ -135,9 +128,6 @@
v4l2_dbg_chip_ident">
v4l2_dbg_match">
v4l2_dbg_register">
-v4l2_dv_enum_preset">
-v4l2_dv_preset">
-v4l2_dv_timings">
v4l2_enc_idx">
v4l2_enc_idx_entry">
v4l2_encoder_cmd">
@@ -253,10 +243,6 @@
-
-
-
-
@@ -347,10 +333,6 @@
-
-
-
-
diff --git a/trunk/Documentation/DocBook/media-indices.tmpl b/trunk/Documentation/DocBook/media-indices.tmpl
index 78d6031de001..9e30a236d74f 100644
--- a/trunk/Documentation/DocBook/media-indices.tmpl
+++ b/trunk/Documentation/DocBook/media-indices.tmpl
@@ -36,7 +36,6 @@
enum v4l2_preemphasis
struct v4l2_audio
struct v4l2_audioout
-struct v4l2_bt_timings
struct v4l2_buffer
struct v4l2_capability
struct v4l2_captureparm
@@ -47,9 +46,6 @@
struct v4l2_dbg_chip_ident
struct v4l2_dbg_match
struct v4l2_dbg_register
-struct v4l2_dv_enum_preset
-struct v4l2_dv_preset
-struct v4l2_dv_timings
struct v4l2_enc_idx
struct v4l2_enc_idx_entry
struct v4l2_encoder_cmd
diff --git a/trunk/Documentation/DocBook/procfs-guide.tmpl b/trunk/Documentation/DocBook/procfs-guide.tmpl
new file mode 100644
index 000000000000..9eba4b7af73d
--- /dev/null
+++ b/trunk/Documentation/DocBook/procfs-guide.tmpl
@@ -0,0 +1,626 @@
+
+
+]>
+
+
+
+ Linux Kernel Procfs Guide
+
+
+
+ Erik
+ (J.A.K.)
+ Mouw
+
+
+ mouw@nl.linux.org
+
+
+
+
+
+ This software and documentation were written while working on the
+ LART computing board
+ (http://www.lartmaker.nl/ ),
+ which was sponsored by the Delt University of Technology projects
+ Mobile Multi-media Communications and Ubiquitous Communications.
+
+
+
+
+
+
+ 1.0
+ May 30, 2001
+ Initial revision posted to linux-kernel
+
+
+ 1.1
+ June 3, 2001
+ Revised after comments from linux-kernel
+
+
+
+
+ 2001
+ Erik Mouw
+
+
+
+
+
+ This documentation is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+
+
+
+ This documentation is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+
+
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+
+
+
+ For more details see the file COPYING in the source
+ distribution of Linux.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Preface
+
+
+ This guide describes the use of the procfs file system from
+ within the Linux kernel. The idea to write this guide came up on
+ the #kernelnewbies IRC channel (see http://www.kernelnewbies.org/ ),
+ when Jeff Garzik explained the use of procfs and forwarded me a
+ message Alexander Viro wrote to the linux-kernel mailing list. I
+ agreed to write it up nicely, so here it is.
+
+
+
+ I'd like to thank Jeff Garzik
+ jgarzik@pobox.com and Alexander Viro
+ viro@parcelfarce.linux.theplanet.co.uk for their input,
+ Tim Waugh twaugh@redhat.com for his Selfdocbook ,
+ and Marc Joosen marcj@historia.et.tudelft.nl for
+ proofreading.
+
+
+
+ Erik
+
+
+
+
+
+
+
+ Introduction
+
+
+ The /proc file system
+ (procfs) is a special file system in the linux kernel. It's a
+ virtual file system: it is not associated with a block device
+ but exists only in memory. The files in the procfs are there to
+ allow userland programs access to certain information from the
+ kernel (like process information in /proc/[0-9]+/ ), but also for debug
+ purposes (like /proc/ksyms ).
+
+
+
+ This guide describes the use of the procfs file system from
+ within the Linux kernel. It starts by introducing all relevant
+ functions to manage the files within the file system. After that
+ it shows how to communicate with userland, and some tips and
+ tricks will be pointed out. Finally a complete example will be
+ shown.
+
+
+
+ Note that the files in /proc/sys are sysctl files: they
+ don't belong to procfs and are governed by a completely
+ different API described in the Kernel API book.
+
+
+
+
+
+
+
+ Managing procfs entries
+
+
+ This chapter describes the functions that various kernel
+ components use to populate the procfs with files, symlinks,
+ device nodes, and directories.
+
+
+
+ A minor note before we start: if you want to use any of the
+ procfs functions, be sure to include the correct header file!
+ This should be one of the first lines in your code:
+
+
+
+#include <linux/proc_fs.h>
+
+
+
+
+
+
+ Creating a regular file
+
+
+
+ struct proc_dir_entry* create_proc_entry
+ const char* name
+ mode_t mode
+ struct proc_dir_entry* parent
+
+
+
+
+ This function creates a regular file with the name
+ name , file mode
+ mode in the directory
+ parent . To create a file in the root of
+ the procfs, use NULL as
+ parent parameter. When successful, the
+ function will return a pointer to the freshly created
+ struct proc_dir_entry ; otherwise it
+ will return NULL . describes how to do something useful with
+ regular files.
+
+
+
+ Note that it is specifically supported that you can pass a
+ path that spans multiple directories. For example
+ create_proc_entry ("drivers/via0/info" )
+ will create the via0
+ directory if necessary, with standard
+ 0755 permissions.
+
+
+
+ If you only want to be able to read the file, the function
+ create_proc_read_entry described in may be used to create and initialise
+ the procfs entry in one single call.
+
+
+
+
+
+
+
+ Creating a symlink
+
+
+
+ struct proc_dir_entry*
+ proc_symlink const
+ char* name
+ struct proc_dir_entry*
+ parent const
+ char* dest
+
+
+
+
+ This creates a symlink in the procfs directory
+ parent that points from
+ name to
+ dest . This translates in userland to
+ ln -s dest
+ name .
+
+
+
+
+ Creating a directory
+
+
+
+ struct proc_dir_entry* proc_mkdir
+ const char* name
+ struct proc_dir_entry* parent
+
+
+
+
+ Create a directory name in the procfs
+ directory parent .
+
+
+
+
+
+
+
+ Removing an entry
+
+
+
+ void remove_proc_entry
+ const char* name
+ struct proc_dir_entry* parent
+
+
+
+
+ Removes the entry name in the directory
+ parent from the procfs. Entries are
+ removed by their name , not by the
+ struct proc_dir_entry returned by the
+ various create functions. Note that this function doesn't
+ recursively remove entries.
+
+
+
+ Be sure to free the data entry from
+ the struct proc_dir_entry before
+ remove_proc_entry is called (that is: if
+ there was some data allocated, of
+ course). See for more information
+ on using the data entry.
+
+
+
+
+
+
+
+
+ Communicating with userland
+
+
+ Instead of reading (or writing) information directly from
+ kernel memory, procfs works with call back
+ functions for files: functions that are called when
+ a specific file is being read or written. Such functions have
+ to be initialised after the procfs file is created by setting
+ the read_proc and/or
+ write_proc fields in the
+ struct proc_dir_entry* that the
+ function create_proc_entry returned:
+
+
+
+struct proc_dir_entry* entry;
+
+entry->read_proc = read_proc_foo;
+entry->write_proc = write_proc_foo;
+
+
+
+ If you only want to use a the
+ read_proc , the function
+ create_proc_read_entry described in may be used to create and initialise the
+ procfs entry in one single call.
+
+
+
+
+
+ Reading data
+
+
+ The read function is a call back function that allows userland
+ processes to read data from the kernel. The read function
+ should have the following format:
+
+
+
+
+ int read_func
+ char* buffer
+ char** start
+ off_t off
+ int count
+ int* peof
+ void* data
+
+
+
+
+ The read function should write its information into the
+ buffer , which will be exactly
+ PAGE_SIZE bytes long.
+
+
+
+ The parameter
+ peof should be used to signal that the
+ end of the file has been reached by writing
+ 1 to the memory location
+ peof points to.
+
+
+
+ The data
+ parameter can be used to create a single call back function for
+ several files, see .
+
+
+
+ The rest of the parameters and the return value are described
+ by a comment in fs/proc/generic.c as follows:
+
+
+
+
+ You have three ways to return data:
+
+
+
+
+ Leave *start = NULL . (This is the default.)
+ Put the data of the requested offset at that
+ offset within the buffer. Return the number (n )
+ of bytes there are from the beginning of the
+ buffer up to the last byte of data. If the
+ number of supplied bytes (= n - offset ) is
+ greater than zero and you didn't signal eof
+ and the reader is prepared to take more data
+ you will be called again with the requested
+ offset advanced by the number of bytes
+ absorbed. This interface is useful for files
+ no larger than the buffer.
+
+
+
+
+ Set *start to an unsigned long value less than
+ the buffer address but greater than zero.
+ Put the data of the requested offset at the
+ beginning of the buffer. Return the number of
+ bytes of data placed there. If this number is
+ greater than zero and you didn't signal eof
+ and the reader is prepared to take more data
+ you will be called again with the requested
+ offset advanced by *start . This interface is
+ useful when you have a large file consisting
+ of a series of blocks which you want to count
+ and return as wholes.
+ (Hack by Paul.Russell@rustcorp.com.au)
+
+
+
+
+ Set *start to an address within the buffer.
+ Put the data of the requested offset at *start .
+ Return the number of bytes of data placed there.
+ If this number is greater than zero and you
+ didn't signal eof and the reader is prepared to
+ take more data you will be called again with the
+ requested offset advanced by the number of bytes
+ absorbed.
+
+
+
+
+
+
+ shows how to use a read call back
+ function.
+
+
+
+
+
+
+
+ Writing data
+
+
+ The write call back function allows a userland process to write
+ data to the kernel, so it has some kind of control over the
+ kernel. The write function should have the following format:
+
+
+
+
+ int write_func
+ struct file* file
+ const char* buffer
+ unsigned long count
+ void* data
+
+
+
+
+ The write function should read count
+ bytes at maximum from the buffer . Note
+ that the buffer doesn't live in the
+ kernel's memory space, so it should first be copied to kernel
+ space with copy_from_user . The
+ file parameter is usually
+ ignored. shows how to use the
+ data parameter.
+
+
+
+ Again, shows how to use this call back
+ function.
+
+
+
+
+
+
+
+ A single call back for many files
+
+
+ When a large number of almost identical files is used, it's
+ quite inconvenient to use a separate call back function for
+ each file. A better approach is to have a single call back
+ function that distinguishes between the files by using the
+ data field in struct
+ proc_dir_entry . First of all, the
+ data field has to be initialised:
+
+
+
+struct proc_dir_entry* entry;
+struct my_file_data *file_data;
+
+file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
+entry->data = file_data;
+
+
+
+ The data field is a void
+ * , so it can be initialised with anything.
+
+
+
+ Now that the data field is set, the
+ read_proc and
+ write_proc can use it to distinguish
+ between files because they get it passed into their
+ data parameter:
+
+
+
+int foo_read_func(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ if(data == file_data) {
+ /* special case for this file */
+ } else {
+ /* normal processing */
+ }
+
+ return len;
+}
+
+
+
+ Be sure to free the data data field
+ when removing the procfs entry.
+
+
+
+
+
+
+
+
+ Tips and tricks
+
+
+
+
+
+ Convenience functions
+
+
+
+ struct proc_dir_entry* create_proc_read_entry
+ const char* name
+ mode_t mode
+ struct proc_dir_entry* parent
+ read_proc_t* read_proc
+ void* data
+
+
+
+
+ This function creates a regular file in exactly the same way
+ as create_proc_entry from does, but also allows to set the read
+ function read_proc in one call. This
+ function can set the data as well, like
+ explained in .
+
+
+
+
+
+
+ Modules
+
+
+ If procfs is being used from within a module, be sure to set
+ the owner field in the
+ struct proc_dir_entry to
+ THIS_MODULE .
+
+
+
+struct proc_dir_entry* entry;
+
+entry->owner = THIS_MODULE;
+
+
+
+
+
+
+
+ Mode and ownership
+
+
+ Sometimes it is useful to change the mode and/or ownership of
+ a procfs entry. Here is an example that shows how to achieve
+ that:
+
+
+
+struct proc_dir_entry* entry;
+
+entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
+entry->uid = 0;
+entry->gid = 100;
+
+
+
+
+
+
+
+
+
+ Example
+
+
+
+&procfsexample;
+
+
+
diff --git a/trunk/Documentation/DocBook/procfs_example.c b/trunk/Documentation/DocBook/procfs_example.c
new file mode 100644
index 000000000000..a5b11793b1e0
--- /dev/null
+++ b/trunk/Documentation/DocBook/procfs_example.c
@@ -0,0 +1,201 @@
+/*
+ * procfs_example.c: an example proc interface
+ *
+ * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
+ *
+ * This file accompanies the procfs-guide in the Linux kernel
+ * source. Its main use is to demonstrate the concepts and
+ * functions described in the guide.
+ *
+ * This software has been developed while working on the LART
+ * computing board (http://www.lartmaker.nl), which was sponsored
+ * by the Delt University of Technology projects Mobile Multi-media
+ * Communications and Ubiquitous Communications.
+ *
+ * This program is free software; you can redistribute
+ * it and/or modify it under the terms of the GNU General
+ * Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "procfs_example"
+
+#define FOOBAR_LEN 8
+
+struct fb_data_t {
+ char name[FOOBAR_LEN + 1];
+ char value[FOOBAR_LEN + 1];
+};
+
+
+static struct proc_dir_entry *example_dir, *foo_file,
+ *bar_file, *jiffies_file, *symlink;
+
+
+struct fb_data_t foo_data, bar_data;
+
+
+static int proc_read_jiffies(char *page, char **start,
+ off_t off, int count,
+ int *eof, void *data)
+{
+ int len;
+
+ len = sprintf(page, "jiffies = %ld\n",
+ jiffies);
+
+ return len;
+}
+
+
+static int proc_read_foobar(char *page, char **start,
+ off_t off, int count,
+ int *eof, void *data)
+{
+ int len;
+ struct fb_data_t *fb_data = (struct fb_data_t *)data;
+
+ /* DON'T DO THAT - buffer overruns are bad */
+ len = sprintf(page, "%s = '%s'\n",
+ fb_data->name, fb_data->value);
+
+ return len;
+}
+
+
+static int proc_write_foobar(struct file *file,
+ const char *buffer,
+ unsigned long count,
+ void *data)
+{
+ int len;
+ struct fb_data_t *fb_data = (struct fb_data_t *)data;
+
+ if(count > FOOBAR_LEN)
+ len = FOOBAR_LEN;
+ else
+ len = count;
+
+ if(copy_from_user(fb_data->value, buffer, len))
+ return -EFAULT;
+
+ fb_data->value[len] = '\0';
+
+ return len;
+}
+
+
+static int __init init_procfs_example(void)
+{
+ int rv = 0;
+
+ /* create directory */
+ example_dir = proc_mkdir(MODULE_NAME, NULL);
+ if(example_dir == NULL) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ /* create jiffies using convenience function */
+ jiffies_file = create_proc_read_entry("jiffies",
+ 0444, example_dir,
+ proc_read_jiffies,
+ NULL);
+ if(jiffies_file == NULL) {
+ rv = -ENOMEM;
+ goto no_jiffies;
+ }
+
+ /* create foo and bar files using same callback
+ * functions
+ */
+ foo_file = create_proc_entry("foo", 0644, example_dir);
+ if(foo_file == NULL) {
+ rv = -ENOMEM;
+ goto no_foo;
+ }
+
+ strcpy(foo_data.name, "foo");
+ strcpy(foo_data.value, "foo");
+ foo_file->data = &foo_data;
+ foo_file->read_proc = proc_read_foobar;
+ foo_file->write_proc = proc_write_foobar;
+
+ bar_file = create_proc_entry("bar", 0644, example_dir);
+ if(bar_file == NULL) {
+ rv = -ENOMEM;
+ goto no_bar;
+ }
+
+ strcpy(bar_data.name, "bar");
+ strcpy(bar_data.value, "bar");
+ bar_file->data = &bar_data;
+ bar_file->read_proc = proc_read_foobar;
+ bar_file->write_proc = proc_write_foobar;
+
+ /* create symlink */
+ symlink = proc_symlink("jiffies_too", example_dir,
+ "jiffies");
+ if(symlink == NULL) {
+ rv = -ENOMEM;
+ goto no_symlink;
+ }
+
+ /* everything OK */
+ printk(KERN_INFO "%s %s initialised\n",
+ MODULE_NAME, MODULE_VERS);
+ return 0;
+
+no_symlink:
+ remove_proc_entry("bar", example_dir);
+no_bar:
+ remove_proc_entry("foo", example_dir);
+no_foo:
+ remove_proc_entry("jiffies", example_dir);
+no_jiffies:
+ remove_proc_entry(MODULE_NAME, NULL);
+out:
+ return rv;
+}
+
+
+static void __exit cleanup_procfs_example(void)
+{
+ remove_proc_entry("jiffies_too", example_dir);
+ remove_proc_entry("bar", example_dir);
+ remove_proc_entry("foo", example_dir);
+ remove_proc_entry("jiffies", example_dir);
+ remove_proc_entry(MODULE_NAME, NULL);
+
+ printk(KERN_INFO "%s %s removed\n",
+ MODULE_NAME, MODULE_VERS);
+}
+
+
+module_init(init_procfs_example);
+module_exit(cleanup_procfs_example);
+
+MODULE_AUTHOR("Erik Mouw");
+MODULE_DESCRIPTION("procfs examples");
+MODULE_LICENSE("GPL");
diff --git a/trunk/Documentation/DocBook/v4l/common.xml b/trunk/Documentation/DocBook/v4l/common.xml
index c65f0ac9b6ee..b1a81d246d58 100644
--- a/trunk/Documentation/DocBook/v4l/common.xml
+++ b/trunk/Documentation/DocBook/v4l/common.xml
@@ -716,41 +716,6 @@ if (-1 == ioctl (fd, &VIDIOC-S-STD;, &std_id)) {
}
-
- Digital Video (DV) Timings
-
- The video standards discussed so far has been dealing with Analog TV and the
-corresponding video timings. Today there are many more different hardware interfaces
-such as High Definition TV interfaces (HDMI), VGA, DVI connectors etc., that carry
-video signals and there is a need to extend the API to select the video timings
-for these interfaces. Since it is not possible to extend the &v4l2-std-id; due to
-the limited bits available, a new set of IOCTLs is added to set/get video timings at
-the input and output:
-
- DV Presets: Digital Video (DV) presets. These are IDs representing a
-video timing at the input/output. Presets are pre-defined timings implemented
-by the hardware according to video standards. A __u32 data type is used to represent
-a preset unlike the bit mask that is used in &v4l2-std-id; allowing future extensions
-to support as many different presets as needed.
-
-
- Custom DV Timings: This will allow applications to define more detailed
-custom video timings for the interface. This includes parameters such as width, height,
-polarities, frontporch, backporch etc.
-
-
-
- To enumerate and query the attributes of DV presets supported by a device,
-applications use the &VIDIOC-ENUM-DV-PRESETS; ioctl. To get the current DV preset,
-applications use the &VIDIOC-G-DV-PRESET; ioctl and to set a preset they use the
-&VIDIOC-S-DV-PRESET; ioctl.
- To set custom DV timings for the device, applications use the
-&VIDIOC-S-DV-TIMINGS; ioctl and to get current custom DV timings they use the
-&VIDIOC-G-DV-TIMINGS; ioctl.
- Applications can make use of the and
- flags to decide what ioctls are available to set the
-video timings for the device.
-
&sub-controls;
diff --git a/trunk/Documentation/DocBook/v4l/compat.xml b/trunk/Documentation/DocBook/v4l/compat.xml
index b9dbdf9e6d29..4d1902a54d61 100644
--- a/trunk/Documentation/DocBook/v4l/compat.xml
+++ b/trunk/Documentation/DocBook/v4l/compat.xml
@@ -2291,8 +2291,8 @@ was renamed to v4l2_chip_ident_old
New control V4L2_CID_COLORFX was added.
-
-
+
+
V4L2 in Linux 2.6.32
@@ -2322,16 +2322,8 @@ more information.
Added Remote Controller chapter, describing the default Remote Controller mapping for media devices.
-
-
-
- V4L2 in Linux 2.6.33
-
-
- Added support for Digital Video timings in order to support HDTV receivers and transmitters.
-
-
-
+
+
diff --git a/trunk/Documentation/DocBook/v4l/v4l2.xml b/trunk/Documentation/DocBook/v4l/v4l2.xml
index 060105af49e5..937b4157a5d0 100644
--- a/trunk/Documentation/DocBook/v4l/v4l2.xml
+++ b/trunk/Documentation/DocBook/v4l/v4l2.xml
@@ -74,17 +74,6 @@ Remote Controller chapter.
-
-
- Muralidharan
- Karicheri
- Documented the Digital Video timings API.
-
-
- m-karicheri2@ti.com
-
-
-
@@ -100,7 +89,7 @@ Remote Controller chapter.
2008
2009
Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
-Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab
+Rubli, Andy Walls, Mauro Carvalho Chehab
Except when explicitly stated as GPL, programming examples within
@@ -113,13 +102,6 @@ structs, ioctls) must be noted in more detail in the history chapter
(compat.sgml), along with the possible impact on existing drivers and
applications. -->
-
- 2.6.33
- 2009-12-03
- mk
- Added documentation for the Digital Video timings API.
-
-
2.6.32
2009-08-31
@@ -373,7 +355,7 @@ and discussions on the V4L mailing list.
Video for Linux Two API Specification
- Revision 2.6.33
+ Revision 2.6.32
&sub-common;
@@ -429,7 +411,6 @@ and discussions on the V4L mailing list.
&sub-encoder-cmd;
&sub-enumaudio;
&sub-enumaudioout;
- &sub-enum-dv-presets;
&sub-enum-fmt;
&sub-enum-framesizes;
&sub-enum-frameintervals;
@@ -440,8 +421,6 @@ and discussions on the V4L mailing list.
&sub-g-audioout;
&sub-g-crop;
&sub-g-ctrl;
- &sub-g-dv-preset;
- &sub-g-dv-timings;
&sub-g-enc-index;
&sub-g-ext-ctrls;
&sub-g-fbuf;
@@ -462,7 +441,6 @@ and discussions on the V4L mailing list.
&sub-querybuf;
&sub-querycap;
&sub-queryctrl;
- &sub-query-dv-preset;
&sub-querystd;
&sub-reqbufs;
&sub-s-hw-freq-seek;
diff --git a/trunk/Documentation/DocBook/v4l/videodev2.h.xml b/trunk/Documentation/DocBook/v4l/videodev2.h.xml
index 068325940658..3e282ed9f593 100644
--- a/trunk/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/trunk/Documentation/DocBook/v4l/videodev2.h.xml
@@ -733,99 +733,6 @@ struct v4l2_standard {
__u32 reserved[4];
};
-/*
- * V I D E O T I M I N G S D V P R E S E T
- */
-struct v4l2_dv_preset {
- __u32 preset;
- __u32 reserved[4];
-};
-
-/*
- * D V P R E S E T S E N U M E R A T I O N
- */
-struct v4l2_dv_enum_preset {
- __u32 index;
- __u32 preset;
- __u8 name[32]; /* Name of the preset timing */
- __u32 width;
- __u32 height;
- __u32 reserved[4];
-};
-
-/*
- * D V P R E S E T V A L U E S
- */
-#define V4L2_DV_INVALID 0
-#define V4L2_DV_480P59_94 1 /* BT.1362 */
-#define V4L2_DV_576P50 2 /* BT.1362 */
-#define V4L2_DV_720P24 3 /* SMPTE 296M */
-#define V4L2_DV_720P25 4 /* SMPTE 296M */
-#define V4L2_DV_720P30 5 /* SMPTE 296M */
-#define V4L2_DV_720P50 6 /* SMPTE 296M */
-#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
-#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
-#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
-#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
-#define V4L2_DV_1080I25 11 /* BT.1120 */
-#define V4L2_DV_1080I50 12 /* SMPTE 296M */
-#define V4L2_DV_1080I60 13 /* SMPTE 296M */
-#define V4L2_DV_1080P24 14 /* SMPTE 296M */
-#define V4L2_DV_1080P25 15 /* SMPTE 296M */
-#define V4L2_DV_1080P30 16 /* SMPTE 296M */
-#define V4L2_DV_1080P50 17 /* BT.1120 */
-#define V4L2_DV_1080P60 18 /* BT.1120 */
-
-/*
- * D V B T T I M I N G S
- */
-
-/* BT.656/BT.1120 timing data */
-struct v4l2_bt_timings {
- __u32 width; /* width in pixels */
- __u32 height; /* height in lines */
- __u32 interlaced; /* Interlaced or progressive */
- __u32 polarities; /* Positive or negative polarity */
- __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
- __u32 hfrontporch; /* Horizpontal front porch in pixels */
- __u32 hsync; /* Horizontal Sync length in pixels */
- __u32 hbackporch; /* Horizontal back porch in pixels */
- __u32 vfrontporch; /* Vertical front porch in pixels */
- __u32 vsync; /* Vertical Sync length in lines */
- __u32 vbackporch; /* Vertical back porch in lines */
- __u32 il_vfrontporch; /* Vertical front porch for bottom field of
- * interlaced field formats
- */
- __u32 il_vsync; /* Vertical sync length for bottom field of
- * interlaced field formats
- */
- __u32 il_vbackporch; /* Vertical back porch for bottom field of
- * interlaced field formats
- */
- __u32 reserved[16];
-} __attribute__ ((packed));
-
-/* Interlaced or progressive format */
-#define V4L2_DV_PROGRESSIVE 0
-#define V4L2_DV_INTERLACED 1
-
-/* Polarities. If bit is not set, it is assumed to be negative polarity */
-#define V4L2_DV_VSYNC_POS_POL 0x00000001
-#define V4L2_DV_HSYNC_POS_POL 0x00000002
-
-
-/* DV timings */
-struct v4l2_dv_timings {
- __u32 type;
- union {
- struct v4l2_bt_timings bt;
- __u32 reserved[32];
- };
-} __attribute__ ((packed));
-
-/* Values for the type field */
-#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
-
/*
* V I D E O I N P U T S
*/
@@ -837,8 +744,7 @@ struct v4l2_input {
__u32 tuner; /* Associated tuner */
v4l2_std_id std;
__u32 status;
- __u32 capabilities;
- __u32 reserved[3];
+ __u32 reserved[4];
};
/* Values for the 'type' field */
@@ -869,11 +775,6 @@ struct v4l2_input {
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
-/* capabilities flags */
-#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
-#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
-#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
-
/*
* V I D E O O U T P U T S
*/
@@ -884,19 +785,13 @@ struct v4l2_output {
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
- __u32 capabilities;
- __u32 reserved[3];
+ __u32 reserved[4];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
-/* capabilities flags */
-#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
-#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
-#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
-
/*
* C O N T R O L S
*/
@@ -1731,13 +1626,6 @@ struct v4l2_dbg_chip_ident {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
-#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
-#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
-#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
-#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
-#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
-#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
-
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml b/trunk/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
deleted file mode 100644
index 1d31427edd1b..000000000000
--- a/trunk/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
+++ /dev/null
@@ -1,238 +0,0 @@
-
-
- ioctl VIDIOC_ENUM_DV_PRESETS
- &manvol;
-
-
-
- VIDIOC_ENUM_DV_PRESETS
- Enumerate supported Digital Video presets
-
-
-
-
-
- int ioctl
- int fd
- int request
- struct v4l2_dv_enum_preset *argp
-
-
-
-
-
- Arguments
-
-
-
- fd
-
- &fd;
-
-
-
- request
-
- VIDIOC_ENUM_DV_PRESETS
-
-
-
- argp
-
-
-
-
-
-
-
-
- Description
-
- To query the attributes of a DV preset, applications initialize the
-index field and zero the reserved array of &v4l2-dv-enum-preset;
-and call the VIDIOC_ENUM_DV_PRESETS ioctl with a pointer to this
-structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all DV Presets supported,
-applications shall begin at index zero, incrementing by one until the
-driver returns EINVAL . Drivers may enumerate a
-different set of DV presets after switching the video input or
-output.
-
-
- struct v4l2_dv_enum_presets
-
- &cs-str;
-
-
- __u32
- index
- Number of the DV preset, set by the
-application.
-
-
- __u32
- preset
- This field identifies one of the DV preset values listed in .
-
-
- __u8
- name [24]
- Name of the preset, a NUL-terminated ASCII string, for example: "720P-60", "1080I-60". This information is
-intended for the user.
-
-
- __u32
- width
- Width of the active video in pixels for the DV preset.
-
-
- __u32
- height
- Height of the active video in lines for the DV preset.
-
-
- __u32
- reserved [4]
- Reserved for future extensions. Drivers must set the array to zero.
-
-
-
-
-
-
- struct DV Presets
-
- &cs-str;
-
-
- Preset
- Preset value
- Description
-
-
-
-
-
-
-
- V4L2_DV_INVALID
- 0
- Invalid preset value.
-
-
- V4L2_DV_480P59_94
- 1
- 720x480 progressive video at 59.94 fps as per BT.1362.
-
-
- V4L2_DV_576P50
- 2
- 720x576 progressive video at 50 fps as per BT.1362.
-
-
- V4L2_DV_720P24
- 3
- 1280x720 progressive video at 24 fps as per SMPTE 296M.
-
-
- V4L2_DV_720P25
- 4
- 1280x720 progressive video at 25 fps as per SMPTE 296M.
-
-
- V4L2_DV_720P30
- 5
- 1280x720 progressive video at 30 fps as per SMPTE 296M.
-
-
- V4L2_DV_720P50
- 6
- 1280x720 progressive video at 50 fps as per SMPTE 296M.
-
-
- V4L2_DV_720P59_94
- 7
- 1280x720 progressive video at 59.94 fps as per SMPTE 274M.
-
-
- V4L2_DV_720P60
- 8
- 1280x720 progressive video at 60 fps as per SMPTE 274M/296M.
-
-
- V4L2_DV_1080I29_97
- 9
- 1920x1080 interlaced video at 29.97 fps as per BT.1120/SMPTE 274M.
-
-
- V4L2_DV_1080I30
- 10
- 1920x1080 interlaced video at 30 fps as per BT.1120/SMPTE 274M.
-
-
- V4L2_DV_1080I25
- 11
- 1920x1080 interlaced video at 25 fps as per BT.1120.
-
-
- V4L2_DV_1080I50
- 12
- 1920x1080 interlaced video at 50 fps as per SMPTE 296M.
-
-
- V4L2_DV_1080I60
- 13
- 1920x1080 interlaced video at 60 fps as per SMPTE 296M.
-
-
- V4L2_DV_1080P24
- 14
- 1920x1080 progressive video at 24 fps as per SMPTE 296M.
-
-
- V4L2_DV_1080P25
- 15
- 1920x1080 progressive video at 25 fps as per SMPTE 296M.
-
-
- V4L2_DV_1080P30
- 16
- 1920x1080 progressive video at 30 fps as per SMPTE 296M.
-
-
- V4L2_DV_1080P50
- 17
- 1920x1080 progressive video at 50 fps as per BT.1120.
-
-
- V4L2_DV_1080P60
- 18
- 1920x1080 progressive video at 60 fps as per BT.1120.
-
-
-
-
-
-
-
- &return-value;
-
-
-
- EINVAL
-
- The &v4l2-dv-enum-preset; index
-is out of bounds.
-
-
-
-
-
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-enuminput.xml b/trunk/Documentation/DocBook/v4l/vidioc-enuminput.xml
index 71b868e2fb8f..414856b82473 100644
--- a/trunk/Documentation/DocBook/v4l/vidioc-enuminput.xml
+++ b/trunk/Documentation/DocBook/v4l/vidioc-enuminput.xml
@@ -124,13 +124,7 @@ current input.
__u32
- capabilities
- This field provides capabilities for the
-input. See for flags.
-
-
- __u32
- reserved [3]
+ reserved [4]
Reserved for future extensions. Drivers must set
the array to zero.
@@ -267,34 +261,6 @@ flag is set Macrovision has been detected.
-
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-enumoutput.xml b/trunk/Documentation/DocBook/v4l/vidioc-enumoutput.xml
index a281d26a195f..e8d16dcd50cf 100644
--- a/trunk/Documentation/DocBook/v4l/vidioc-enumoutput.xml
+++ b/trunk/Documentation/DocBook/v4l/vidioc-enumoutput.xml
@@ -114,13 +114,7 @@ details on video standards and how to switch see
__u32
- capabilities
- This field provides capabilities for the
-output. See for flags.
-
-
- __u32
- reserved [3]
+ reserved [4]
Reserved for future extensions. Drivers must set
the array to zero.
@@ -153,34 +147,6 @@ CVBS, S-Video, RGB.
-
-
- Output capabilities
-
- &cs-def;
-
-
- V4L2_OUT_CAP_PRESETS
- 0x00000001
- This output supports setting DV presets by using VIDIOC_S_DV_PRESET.
-
-
- V4L2_OUT_CAP_CUSTOM_TIMINGS
- 0x00000002
- This output supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.
-
-
- V4L2_OUT_CAP_STD
- 0x00000004
- This output supports setting the TV standard by using VIDIOC_S_STD.
-
-
-
-
-
&return-value;
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml b/trunk/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
deleted file mode 100644
index 3c6784e132f3..000000000000
--- a/trunk/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
- ioctl VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET
- &manvol;
-
-
-
- VIDIOC_G_DV_PRESET
- VIDIOC_S_DV_PRESET
- Query or select the DV preset of the current input or output
-
-
-
-
-
- int ioctl
- int fd
- int request
- &v4l2-dv-preset;
-*argp
-
-
-
-
-
- Arguments
-
-
-
- fd
-
- &fd;
-
-
-
- request
-
- VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET
-
-
-
- argp
-
-
-
-
-
-
-
-
- Description
- To query and select the current DV preset, applications
-use the VIDIOC_G_DV_PRESET and VIDIOC_S_DV_PRESET
-ioctls which take a pointer to a &v4l2-dv-preset; type as argument.
-Applications must zero the reserved array in &v4l2-dv-preset;.
-VIDIOC_G_DV_PRESET returns a dv preset in the field
-preset of &v4l2-dv-preset;.
-
- VIDIOC_S_DV_PRESET accepts a pointer to a &v4l2-dv-preset;
-that has the preset value to be set. Applications must zero the reserved array in &v4l2-dv-preset;.
-If the preset is not supported, it returns an &EINVAL;
-
-
-
- &return-value;
-
-
-
- EINVAL
-
- This ioctl is not supported, or the
-VIDIOC_S_DV_PRESET ,VIDIOC_S_DV_PRESET parameter was unsuitable.
-
-
-
- EBUSY
-
- The device is busy and therefore can not change the preset.
-
-
-
-
-
- struct v4l2_dv_preset
-
- &cs-str;
-
-
- __u32
- preset
- Preset value to represent the digital video timings
-
-
- __u32
- reserved[4]
- Reserved fields for future use
-
-
-
-
-
-
-
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml b/trunk/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
deleted file mode 100644
index ecc19576bb8f..000000000000
--- a/trunk/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
+++ /dev/null
@@ -1,224 +0,0 @@
-
-
- ioctl VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS
- &manvol;
-
-
-
- VIDIOC_G_DV_TIMINGS
- VIDIOC_S_DV_TIMINGS
- Get or set custom DV timings for input or output
-
-
-
-
-
- int ioctl
- int fd
- int request
- &v4l2-dv-timings;
-*argp
-
-
-
-
-
- Arguments
-
-
-
- fd
-
- &fd;
-
-
-
- request
-
- VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS
-
-
-
- argp
-
-
-
-
-
-
-
-
- Description
- To set custom DV timings for the input or output, applications use the
-VIDIOC_S_DV_TIMINGS ioctl and to get the current custom timings,
-applications use the VIDIOC_G_DV_TIMINGS ioctl. The detailed timing
-information is filled in using the structure &v4l2-dv-timings;. These ioctls take
-a pointer to the &v4l2-dv-timings; structure as argument. If the ioctl is not supported
-or the timing values are not correct, the driver returns &EINVAL;.
-
-
-
- &return-value;
-
-
-
- EINVAL
-
- This ioctl is not supported, or the
-VIDIOC_S_DV_TIMINGS parameter was unsuitable.
-
-
-
- EBUSY
-
- The device is busy and therefore can not change the timings.
-
-
-
-
-
- struct v4l2_bt_timings
-
- &cs-str;
-
-
- __u32
- width
- Width of the active video in pixels
-
-
- __u32
- height
- Height of the active video in lines
-
-
- __u32
- interlaced
- Progressive (0) or interlaced (1)
-
-
- __u32
- polarities
- This is a bit mask that defines polarities of sync signals.
-bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_HSYNC_POS_POL) is for horizontal sync polarity. If the bit is set
-(1) it is positive polarity and if is cleared (0), it is negative polarity.
-
-
- __u64
- pixelclock
- Pixel clock in Hz. Ex. 74.25MHz->74250000
-
-
- __u32
- hfrontporch
- Horizontal front porch in pixels
-
-
- __u32
- hsync
- Horizontal sync length in pixels
-
-
- __u32
- hbackporch
- Horizontal back porch in pixels
-
-
- __u32
- vfrontporch
- Vertical front porch in lines
-
-
- __u32
- vsync
- Vertical sync length in lines
-
-
- __u32
- vbackporch
- Vertical back porch in lines
-
-
- __u32
- il_vfrontporch
- Vertical front porch in lines for bottom field of interlaced field formats
-
-
- __u32
- il_vsync
- Vertical sync length in lines for bottom field of interlaced field formats
-
-
- __u32
- il_vbackporch
- Vertical back porch in lines for bottom field of interlaced field formats
-
-
-
-
-
-
- struct v4l2_dv_timings
-
- &cs-str;
-
-
- __u32
- type
-
- Type of DV timings as listed in .
-
-
- union
-
-
-
-
-
- &v4l2-bt-timings;
- bt
- Timings defined by BT.656/1120 specifications
-
-
-
- __u32
- reserved [32]
-
-
-
-
-
-
-
- DV Timing types
-
- &cs-str;
-
-
- Timing type
- value
- Description
-
-
-
-
-
-
-
- V4L2_DV_BT_656_1120
- 0
- BT.656/1120 timings
-
-
-
-
-
-
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-g-std.xml b/trunk/Documentation/DocBook/v4l/vidioc-g-std.xml
index 912f8513e5da..b6f5d267e856 100644
--- a/trunk/Documentation/DocBook/v4l/vidioc-g-std.xml
+++ b/trunk/Documentation/DocBook/v4l/vidioc-g-std.xml
@@ -86,12 +86,6 @@ standards.
VIDIOC_S_STD parameter was unsuitable.
-
- EBUSY
-
- The device is busy and therefore can not change the standard
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml b/trunk/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
deleted file mode 100644
index 87e4f0f6151c..000000000000
--- a/trunk/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
- ioctl VIDIOC_QUERY_DV_PRESET
- &manvol;
-
-
-
- VIDIOC_QUERY_DV_PRESET
- Sense the DV preset received by the current
-input
-
-
-
-
-
- int ioctl
- int fd
- int request
- &v4l2-dv-preset; *argp
-
-
-
-
-
- Arguments
-
-
-
- fd
-
- &fd;
-
-
-
- request
-
- VIDIOC_QUERY_DV_PRESET
-
-
-
- argp
-
-
-
-
-
-
-
-
- Description
-
- The hardware may be able to detect the current DV preset
-automatically, similar to sensing the video standard. To do so, applications
-call VIDIOC_QUERY_DV_PRESET with a pointer to a
-&v4l2-dv-preset; type. Once the hardware detects a preset, that preset is
-returned in the preset field of &v4l2-dv-preset;. When detection is not
-possible or fails, the value V4L2_DV_INVALID is returned.
-
-
-
- &return-value;
-
-
- EINVAL
-
- This ioctl is not supported.
-
-
-
- EBUSY
-
- The device is busy and therefore can not sense the preset
-
-
-
-
-
-
-
diff --git a/trunk/Documentation/DocBook/v4l/vidioc-querystd.xml b/trunk/Documentation/DocBook/v4l/vidioc-querystd.xml
index 1a9e60393091..b5a7ff934486 100644
--- a/trunk/Documentation/DocBook/v4l/vidioc-querystd.xml
+++ b/trunk/Documentation/DocBook/v4l/vidioc-querystd.xml
@@ -70,12 +70,6 @@ current video input or output.
This ioctl is not supported.
-
- EBUSY
-
- The device is busy and therefore can not detect the standard
-
-
diff --git a/trunk/Documentation/SubmitChecklist b/trunk/Documentation/SubmitChecklist
index 1053a56be3b1..78a9168ff377 100644
--- a/trunk/Documentation/SubmitChecklist
+++ b/trunk/Documentation/SubmitChecklist
@@ -15,7 +15,7 @@ kernel patches.
2: Passes allnoconfig, allmodconfig
3: Builds on multiple CPU architectures by using local cross-compile tools
- or some other build farm.
+ or something like PLM at OSDL.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
@@ -88,6 +88,3 @@ kernel patches.
24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
source code that explains the logic of what they are doing and why.
-
-25: If any ioctl's are added by the patch, then also update
- Documentation/ioctl/ioctl-number.txt.
diff --git a/trunk/Documentation/fb/viafb.txt b/trunk/Documentation/fb/viafb.txt
index f3e046a6a987..67dbf442b0b6 100644
--- a/trunk/Documentation/fb/viafb.txt
+++ b/trunk/Documentation/fb/viafb.txt
@@ -7,7 +7,7 @@
VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
P4M800CE / P4M800Pro / CN700 / VN800,
CX700 / VX700, K8M890, P4M890,
- CN896 / P4M900, VX800, VX855)
+ CN896 / P4M900, VX800)
[Driver features]
------------------------
@@ -154,6 +154,13 @@
0 : No Dual Edge Panel (default)
1 : Dual Edge Panel
+ viafb_video_dev:
+ This option is used to specify video output devices(CRT, DVI, LCD) for
+ duoview case.
+ For example:
+ To output video on DVI, we should use:
+ modprobe viafb viafb_video_dev=DVI...
+
viafb_lcd_port:
This option is used to specify LCD output port,
available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
@@ -174,6 +181,9 @@ Notes:
and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
instead of calling common ioctl function FBIOPUT_VSCREENINFO since
viafb doesn't support multi-head well, or it will cause screen crush.
+ 4. VX800 2D accelerator hasn't been supported in this driver yet. When
+ using driver on VX800, the driver will disable the acceleration
+ function as default.
[Configure viafb with "fbset" tool]
diff --git a/trunk/Documentation/filesystems/seq_file.txt b/trunk/Documentation/filesystems/seq_file.txt
index a1e2e0dda907..0d15ebccf5b0 100644
--- a/trunk/Documentation/filesystems/seq_file.txt
+++ b/trunk/Documentation/filesystems/seq_file.txt
@@ -248,7 +248,9 @@ code, that is done in the initialization code in the usual way:
{
struct proc_dir_entry *entry;
- proc_create("sequence", 0, NULL, &ct_file_ops);
+ entry = create_proc_entry("sequence", 0, NULL);
+ if (entry)
+ entry->proc_fops = &ct_file_ops;
return 0;
}
diff --git a/trunk/Documentation/gpio.txt b/trunk/Documentation/gpio.txt
index 1866c27eec69..e4e7daed2ba8 100644
--- a/trunk/Documentation/gpio.txt
+++ b/trunk/Documentation/gpio.txt
@@ -531,13 +531,6 @@ and have the following read/write attributes:
This file exists only if the pin can be configured as an
interrupt generating input pin.
- "active_low" ... reads as either 0 (false) or 1 (true). Write
- any nonzero value to invert the value attribute both
- for reading and writing. Existing and subsequent
- poll(2) support configuration via the edge attribute
- for "rising" and "falling" edges will follow this
- setting.
-
GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
@@ -573,8 +566,6 @@ requested using gpio_request():
int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
- /* change the polarity of a GPIO node in sysfs */
- int gpio_sysfs_set_active_low(unsigned gpio, int value);
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
@@ -589,9 +580,3 @@ After the GPIO has been exported, gpio_export_link() allows creating
symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
use this to provide the interface under their own device in sysfs with
a descriptive name.
-
-Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
-differences between boards from user space. This only affects the
-sysfs interface. Polarity change can be done both before and after
-gpio_export(), and previously enabled poll(2) support for either
-rising or falling edge will be reconfigured to follow this setting.
diff --git a/trunk/Documentation/infiniband/ipoib.txt b/trunk/Documentation/infiniband/ipoib.txt
index 64eeb55d0c09..6d40f00b358c 100644
--- a/trunk/Documentation/infiniband/ipoib.txt
+++ b/trunk/Documentation/infiniband/ipoib.txt
@@ -36,11 +36,11 @@ Datagram vs Connected modes
fabric with a 2K MTU, the IPoIB MTU will be 2048 - 4 = 2044 bytes.
In connected mode, the IB RC (Reliable Connected) transport is used.
- Connected mode takes advantage of the connected nature of the IB
- transport and allows an MTU up to the maximal IP packet size of 64K,
- which reduces the number of IP packets needed for handling large UDP
- datagrams, TCP segments, etc and increases the performance for large
- messages.
+ Connected mode is to takes advantage of the connected nature of the
+ IB transport and allows an MTU up to the maximal IP packet size of
+ 64K, which reduces the number of IP packets needed for handling
+ large UDP datagrams, TCP segments, etc and increases the performance
+ for large messages.
In connected mode, the interface's UD QP is still used for multicast
and communication with peers that don't support connected mode. In
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index c309515ae959..ab95d3ada5c7 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -2729,11 +2729,6 @@ and is between 256 and 4096 characters. It is defined in the file
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format:
- vt.cur_default= [VT] Default cursor shape.
- Format: 0xCCBBAA, where AA, BB, and CC are the same as
- the parameters of the [?A;B;Cc escape sequence;
- see VGA-softcursor.txt. Default: 2 = underline.
-
vt.default_blu= [VT]
Format: ,,,...,
Change the default blue palette of the console.
diff --git a/trunk/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
deleted file mode 100644
index 515ebcf1b97d..000000000000
--- a/trunk/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-PPC440SPe DMA/XOR (DMA Controller and XOR Accelerator)
-
-Device nodes needed for operation of the ppc440spe-adma driver
-are specified hereby. These are I2O/DMA, DMA and XOR nodes
-for DMA engines and Memory Queue Module node. The latter is used
-by ADMA driver for configuration of RAID-6 H/W capabilities of
-the PPC440SPe. In addition to the nodes and properties described
-below, the ranges property of PLB node must specify ranges for
-DMA devices.
-
- i) The I2O node
-
- Required properties:
-
- - compatible : "ibm,i2o-440spe";
- - reg :
- - dcr-reg :
-
- Example:
-
- I2O: i2o@400100000 {
- compatible = "ibm,i2o-440spe";
- reg = <0x00000004 0x00100000 0x100>;
- dcr-reg = <0x060 0x020>;
- };
-
-
- ii) The DMA node
-
- Required properties:
-
- - compatible : "ibm,dma-440spe";
- - cell-index : 1 cell, hardware index of the DMA engine
- (typically 0x0 and 0x1 for DMA0 and DMA1)
- - reg :
- - dcr-reg :
- - interrupts : .
- - interrupt-parent : needed for interrupt mapping
-
- Example:
-
- DMA0: dma0@400100100 {
- compatible = "ibm,dma-440spe";
- cell-index = <0>;
- reg = <0x00000004 0x00100100 0x100>;
- dcr-reg = <0x060 0x020>;
- interrupt-parent = <&DMA0>;
- interrupts = <0 1>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <
- 0 &UIC0 0x14 4
- 1 &UIC1 0x16 4>;
- };
-
-
- iii) XOR Accelerator node
-
- Required properties:
-
- - compatible : "amcc,xor-accelerator";
- - reg :
- - interrupts :
- - interrupt-parent : for interrupt mapping
-
- Example:
-
- xor-accel@400200000 {
- compatible = "amcc,xor-accelerator";
- reg = <0x00000004 0x00200000 0x400>;
- interrupt-parent = <&UIC1>;
- interrupts = <0x1f 4>;
- };
-
-
- iv) Memory Queue Module node
-
- Required properties:
-
- - compatible : "ibm,mq-440spe";
- - dcr-reg :
-
- Example:
-
- MQ0: mq {
- compatible = "ibm,mq-440spe";
- dcr-reg = <0x040 0x020>;
- };
-
diff --git a/trunk/Documentation/video4linux/gspca.txt b/trunk/Documentation/video4linux/gspca.txt
index 1800a62cf135..319d9838e87e 100644
--- a/trunk/Documentation/video4linux/gspca.txt
+++ b/trunk/Documentation/video4linux/gspca.txt
@@ -12,7 +12,6 @@ m5602 0402:5602 ALi Video Camera Controller
spca501 040a:0002 Kodak DVC-325
spca500 040a:0300 Kodak EZ200
zc3xx 041e:041e Creative WebCam Live!
-ov519 041e:4003 Video Blaster WebCam Go Plus
spca500 041e:400a Creative PC-CAM 300
sunplus 041e:400b Creative PC-CAM 600
sunplus 041e:4012 PC-Cam350
@@ -169,14 +168,10 @@ sunplus 055f:c650 Mustek MDC5500Z
zc3xx 055f:d003 Mustek WCam300A
zc3xx 055f:d004 Mustek WCam300 AN
conex 0572:0041 Creative Notebook cx11646
-ov519 05a9:0511 Video Blaster WebCam 3/WebCam Plus, D-Link USB Digital Video Camera
-ov519 05a9:0518 Creative WebCam
ov519 05a9:0519 OV519 Microphone
ov519 05a9:0530 OmniVision
-ov519 05a9:2800 OmniVision SuperCAM
ov519 05a9:4519 Webcam Classic
ov519 05a9:8519 OmniVision
-ov519 05a9:a511 D-Link USB Digital Video Camera
ov519 05a9:a518 D-Link DSB-C310 Webcam
sunplus 05da:1018 Digital Dream Enigma 1.3
stk014 05e1:0893 Syntek DV4000
@@ -192,7 +187,7 @@ ov534 06f8:3002 Hercules Blog Webcam
ov534 06f8:3003 Hercules Dualpix HD Weblog
sonixj 06f8:3004 Hercules Classic Silver
sonixj 06f8:3008 Hercules Deluxe Optical Glass
-pac7302 06f8:3009 Hercules Classic Link
+pac7311 06f8:3009 Hercules Classic Link
spca508 0733:0110 ViewQuest VQ110
spca501 0733:0401 Intel Create and Share
spca501 0733:0402 ViewQuest M318B
@@ -204,7 +199,6 @@ sunplus 0733:2221 Mercury Digital Pro 3.1p
sunplus 0733:3261 Concord 3045 spca536a
sunplus 0733:3281 Cyberpix S550V
spca506 0734:043b 3DeMon USB Capture aka
-ov519 0813:0002 Dual Mode USB Camera Plus
spca500 084d:0003 D-Link DSC-350
spca500 08ca:0103 Aiptek PocketDV
sunplus 08ca:0104 Aiptek PocketDVII 1.3
@@ -242,15 +236,15 @@ pac7311 093a:2603 Philips SPC 500 NC
pac7311 093a:2608 Trust WB-3300p
pac7311 093a:260e Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
pac7311 093a:260f SnakeCam
-pac7302 093a:2620 Apollo AC-905
-pac7302 093a:2621 PAC731x
-pac7302 093a:2622 Genius Eye 312
-pac7302 093a:2624 PAC7302
-pac7302 093a:2626 Labtec 2200
-pac7302 093a:2628 Genius iLook 300
-pac7302 093a:2629 Genious iSlim 300
-pac7302 093a:262a Webcam 300k
-pac7302 093a:262c Philips SPC 230 NC
+pac7311 093a:2620 Apollo AC-905
+pac7311 093a:2621 PAC731x
+pac7311 093a:2622 Genius Eye 312
+pac7311 093a:2624 PAC7302
+pac7311 093a:2626 Labtec 2200
+pac7311 093a:2628 Genius iLook 300
+pac7311 093a:2629 Genious iSlim 300
+pac7311 093a:262a Webcam 300k
+pac7311 093a:262c Philips SPC 230 NC
jeilinj 0979:0280 Sakar 57379
zc3xx 0ac8:0302 Z-star Vimicro zc0302
vc032x 0ac8:0321 Vimicro generic vc0321
@@ -265,7 +259,6 @@ vc032x 0ac8:c002 Sony embedded vimicro
vc032x 0ac8:c301 Samsung Q1 Ultra Premium
spca508 0af9:0010 Hama USB Sightcam 100
spca508 0af9:0011 Hama USB Sightcam 100
-ov519 0b62:0059 iBOT2 Webcam
sonixb 0c45:6001 Genius VideoCAM NB
sonixb 0c45:6005 Microdia Sweex Mini Webcam
sonixb 0c45:6007 Sonix sn9c101 + Tas5110D
@@ -325,10 +318,8 @@ sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655)
sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660)
sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R)
sunplus 0d64:0303 Sunplus FashionCam DXG
-ov519 0e96:c001 TRUST 380 USB2 SPACEC@M
etoms 102c:6151 Qcam Sangha CIF
etoms 102c:6251 Qcam xxxxxx VGA
-ov519 1046:9967 W9967CF/W9968CF WebCam IC, Video Blaster WebCam Go
zc3xx 10fd:0128 Typhoon Webshot II USB 300k 0x0128
spca561 10fd:7e50 FlyCam Usb 100
zc3xx 10fd:8050 Typhoon Webshot II USB 300k
@@ -341,12 +332,7 @@ spca501 1776:501c Arowana 300K CMOS Camera
t613 17a1:0128 TASCORP JPEG Webcam, NGS Cyclops
vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC
pac207 2001:f115 D-Link DSB-C120
-sq905c 2770:9050 sq905c
-sq905c 2770:905c DualCamera
-sq905 2770:9120 Argus Digital Camera DC1512
-sq905c 2770:913d sq905c
spca500 2899:012c Toptro Industrial
-ov519 8020:ef04 ov519
spca508 8086:0110 Intel Easy PC Camera
spca500 8086:0630 Intel Pocket PC Camera
spca506 99fa:8988 Grandtec V.cap
diff --git a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt b/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt
deleted file mode 100644
index 2ae16349a78d..000000000000
--- a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt
+++ /dev/null
@@ -1,157 +0,0 @@
- Cropping and Scaling algorithm, used in the sh_mobile_ceu_camera driver
- =======================================================================
-
-Terminology
------------
-
-sensor scales: horizontal and vertical scales, configured by the sensor driver
-host scales: -"- host driver
-combined scales: sensor_scale * host_scale
-
-
-Generic scaling / cropping scheme
----------------------------------
-
--1--
-|
--2-- -\
-| --\
-| --\
-+-5-- -\ -- -3--
-| ---\
-| --- -4-- -\
-| -\
-| - -6--
-|
-| - -6'-
-| -/
-| --- -4'- -/
-| ---/
-+-5'- -/
-| -- -3'-
-| --/
-| --/
--2'- -/
-|
-|
--1'-
-
-Produced by user requests:
-
-S_CROP(left / top = (5) - (1), width / height = (5') - (5))
-S_FMT(width / height = (6') - (6))
-
-Here:
-
-(1) to (1') - whole max width or height
-(1) to (2) - sensor cropped left or top
-(2) to (2') - sensor cropped width or height
-(3) to (3') - sensor scale
-(3) to (4) - CEU cropped left or top
-(4) to (4') - CEU cropped width or height
-(5) to (5') - reverse sensor scale applied to CEU cropped width or height
-(2) to (5) - reverse sensor scale applied to CEU cropped left or top
-(6) to (6') - CEU scale - user window
-
-
-S_FMT
------
-
-Do not touch input rectangle - it is already optimal.
-
-1. Calculate current sensor scales:
-
- scale_s = ((3') - (3)) / ((2') - (2))
-
-2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at
-current sensor scales onto input window - this is user S_CROP:
-
- width_u = (5') - (5) = ((4') - (4)) * scale_s
-
-3. Calculate new combined scales from "effective" input window to requested user
-window:
-
- scale_comb = width_u / ((6') - (6))
-
-4. Calculate sensor output window by applying combined scales to real input
-window:
-
- width_s_out = ((2') - (2)) / scale_comb
-
-5. Apply iterative sensor S_FMT for sensor output window.
-
- subdev->video_ops->s_fmt(.width = width_s_out)
-
-6. Retrieve sensor output window (g_fmt)
-
-7. Calculate new sensor scales:
-
- scale_s_new = ((3')_new - (3)_new) / ((2') - (2))
-
-8. Calculate new CEU crop - apply sensor scales to previously calculated
-"effective" crop:
-
- width_ceu = (4')_new - (4)_new = width_u / scale_s_new
- left_ceu = (4)_new - (3)_new = ((5) - (2)) / scale_s_new
-
-9. Use CEU cropping to crop to the new window:
-
- ceu_crop(.width = width_ceu, .left = left_ceu)
-
-10. Use CEU scaling to scale to the requested user window:
-
- scale_ceu = width_ceu / width
-
-
-S_CROP
-------
-
-If old scale applied to new crop is invalid produce nearest new scale possible
-
-1. Calculate current combined scales.
-
- scale_comb = (((4') - (4)) / ((6') - (6))) * (((2') - (2)) / ((3') - (3)))
-
-2. Apply iterative sensor S_CROP for new input window.
-
-3. If old combined scales applied to new crop produce an impossible user window,
-adjust scales to produce nearest possible window.
-
- width_u_out = ((5') - (5)) / scale_comb
-
- if (width_u_out > max)
- scale_comb = ((5') - (5)) / max;
- else if (width_u_out < min)
- scale_comb = ((5') - (5)) / min;
-
-4. Issue G_CROP to retrieve actual input window.
-
-5. Using actual input window and calculated combined scales calculate sensor
-target output window.
-
- width_s_out = ((3') - (3)) = ((2') - (2)) / scale_comb
-
-6. Apply iterative S_FMT for new sensor target output window.
-
-7. Issue G_FMT to retrieve the actual sensor output window.
-
-8. Calculate sensor scales.
-
- scale_s = ((3') - (3)) / ((2') - (2))
-
-9. Calculate sensor output subwindow to be cropped on CEU by applying sensor
-scales to the requested window.
-
- width_ceu = ((5') - (5)) / scale_s
-
-10. Use CEU cropping for above calculated window.
-
-11. Calculate CEU scales from sensor scales from results of (10) and user window
-from (3)
-
- scale_ceu = calc_scale(((5') - (5)), &width_u_out)
-
-12. Apply CEU scales.
-
---
-Author: Guennadi Liakhovetski
diff --git a/trunk/Documentation/video4linux/v4l2-framework.txt b/trunk/Documentation/video4linux/v4l2-framework.txt
index 74d677c8b036..b806edaf3e75 100644
--- a/trunk/Documentation/video4linux/v4l2-framework.txt
+++ b/trunk/Documentation/video4linux/v4l2-framework.txt
@@ -561,8 +561,6 @@ video_device helper functions
There are a few useful helper functions:
-- file/video_device private data
-
You can set/get driver private data in the video_device struct using:
void *video_get_drvdata(struct video_device *vdev);
@@ -577,7 +575,8 @@ struct video_device *video_devdata(struct file *file);
returns the video_device belonging to the file struct.
-The video_drvdata function combines video_get_drvdata with video_devdata:
+The final helper function combines video_get_drvdata with
+video_devdata:
void *video_drvdata(struct file *file);
@@ -585,17 +584,6 @@ You can go from a video_device struct to the v4l2_device struct using:
struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
-- Device node name
-
-The video_device node kernel name can be retrieved using
-
-const char *video_device_node_name(struct video_device *vdev);
-
-The name is used as a hint by userspace tools such as udev. The function
-should be used where possible instead of accessing the video_device::num and
-video_device::minor fields.
-
-
video buffer helper functions
-----------------------------
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index d6a27110a747..0a32c3ec6b1c 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -5991,9 +5991,9 @@ F: sound/soc/codecs/wm8350.*
F: sound/soc/codecs/wm8400.*
X.25 NETWORK LAYER
-M: Andrew Hendry
+M: Henner Eisen
L: linux-x25@vger.kernel.org
-S: Odd Fixes
+S: Maintained
F: Documentation/networking/x25*
F: include/net/x25*
F: net/x25/
diff --git a/trunk/arch/alpha/include/asm/elf.h b/trunk/arch/alpha/include/asm/elf.h
index 9baae8afe8a3..5c75c1b2352a 100644
--- a/trunk/arch/alpha/include/asm/elf.h
+++ b/trunk/arch/alpha/include/asm/elf.h
@@ -81,6 +81,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/arm/include/asm/elf.h b/trunk/arch/arm/include/asm/elf.h
index a399bb5730f1..6aac3f5bb2f3 100644
--- a/trunk/arch/arm/include/asm/elf.h
+++ b/trunk/arch/arm/include/asm/elf.h
@@ -101,6 +101,7 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/arm/mach-bcmring/arch.c b/trunk/arch/arm/mach-bcmring/arch.c
index 53dd2a9eecf9..fbe6fa02c882 100644
--- a/trunk/arch/arm/mach-bcmring/arch.c
+++ b/trunk/arch/arm/mach-bcmring/arch.c
@@ -70,19 +70,9 @@ static struct ctl_table bcmring_sysctl_reboot[] = {
{}
};
-static struct resource nand_resource[] = {
- [0] = {
- .start = MM_ADDR_IO_NAND,
- .end = MM_ADDR_IO_NAND + 0x1000 - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
static struct platform_device nand_device = {
.name = "bcm-nand",
.id = -1,
- .resource = nand_resource,
- .num_resources = ARRAY_SIZE(nand_resource),
};
static struct platform_device *devices[] __initdata = {
diff --git a/trunk/arch/arm/mach-bcmring/include/mach/reg_nand.h b/trunk/arch/arm/mach-bcmring/include/mach/reg_nand.h
deleted file mode 100644
index 387376ffb56b..000000000000
--- a/trunk/arch/arm/mach-bcmring/include/mach/reg_nand.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*****************************************************************************
-* Copyright 2001 - 2008 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/*
-*
-*****************************************************************************
-*
-* REG_NAND.h
-*
-* PURPOSE:
-*
-* This file contains definitions for the nand registers:
-*
-* NOTES:
-*
-*****************************************************************************/
-
-#if !defined(__ASM_ARCH_REG_NAND_H)
-#define __ASM_ARCH_REG_NAND_H
-
-/* ---- Include Files ---------------------------------------------------- */
-#include
-#include
-
-/* ---- Constants and Types ---------------------------------------------- */
-
-#define HW_NAND_BASE MM_IO_BASE_NAND /* NAND Flash */
-
-/* DMA accesses by the bootstrap need hard nonvirtual addresses */
-#define REG_NAND_CMD __REG16(HW_NAND_BASE + 0)
-#define REG_NAND_ADDR __REG16(HW_NAND_BASE + 4)
-
-#define REG_NAND_PHYS_DATA16 (HW_NAND_BASE + 8)
-#define REG_NAND_PHYS_DATA8 (HW_NAND_BASE + 8)
-#define REG_NAND_DATA16 __REG16(REG_NAND_PHYS_DATA16)
-#define REG_NAND_DATA8 __REG8(REG_NAND_PHYS_DATA8)
-
-/* use appropriate offset to make sure it start at the 1K boundary */
-#define REG_NAND_PHYS_DATA_DMA (HW_NAND_BASE + 0x400)
-#define REG_NAND_DATA_DMA __REG32(REG_NAND_PHYS_DATA_DMA)
-
-/* Linux DMA requires physical address of the data register */
-#define REG_NAND_DATA16_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA16)
-#define REG_NAND_DATA8_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA8)
-#define REG_NAND_DATA_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA_DMA)
-
-#define NAND_BUS_16BIT() (0)
-#define NAND_BUS_8BIT() (!NAND_BUS_16BIT())
-
-/* Register offsets */
-#define REG_NAND_CMD_OFFSET (0)
-#define REG_NAND_ADDR_OFFSET (4)
-#define REG_NAND_DATA8_OFFSET (8)
-
-#endif
diff --git a/trunk/arch/arm/mach-bcmring/include/mach/reg_umi.h b/trunk/arch/arm/mach-bcmring/include/mach/reg_umi.h
deleted file mode 100644
index 06a355481ea6..000000000000
--- a/trunk/arch/arm/mach-bcmring/include/mach/reg_umi.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*****************************************************************************
-* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/*
-*
-*****************************************************************************
-*
-* REG_UMI.h
-*
-* PURPOSE:
-*
-* This file contains definitions for the nand registers:
-*
-* NOTES:
-*
-*****************************************************************************/
-
-#if !defined(__ASM_ARCH_REG_UMI_H)
-#define __ASM_ARCH_REG_UMI_H
-
-/* ---- Include Files ---------------------------------------------------- */
-#include
-#include
-
-/* ---- Constants and Types ---------------------------------------------- */
-
-/* Unified Memory Interface Ctrl Register */
-#define HW_UMI_BASE MM_IO_BASE_UMI
-
-/* Flash bank 0 timing and control register */
-#define REG_UMI_FLASH0_TCR __REG32(HW_UMI_BASE + 0x00)
-/* Flash bank 1 timing and control register */
-#define REG_UMI_FLASH1_TCR __REG32(HW_UMI_BASE + 0x04)
-/* Flash bank 2 timing and control register */
-#define REG_UMI_FLASH2_TCR __REG32(HW_UMI_BASE + 0x08)
-/* MMD interface and control register */
-#define REG_UMI_MMD_ICR __REG32(HW_UMI_BASE + 0x0c)
-/* NAND timing and control register */
-#define REG_UMI_NAND_TCR __REG32(HW_UMI_BASE + 0x18)
-/* NAND ready/chip select register */
-#define REG_UMI_NAND_RCSR __REG32(HW_UMI_BASE + 0x1c)
-/* NAND ECC control & status register */
-#define REG_UMI_NAND_ECC_CSR __REG32(HW_UMI_BASE + 0x20)
-/* NAND ECC data register XXB2B1B0 */
-#define REG_UMI_NAND_ECC_DATA __REG32(HW_UMI_BASE + 0x24)
-/* BCH ECC Parameter N */
-#define REG_UMI_BCH_N __REG32(HW_UMI_BASE + 0x40)
-/* BCH ECC Parameter T */
-#define REG_UMI_BCH_K __REG32(HW_UMI_BASE + 0x44)
-/* BCH ECC Parameter K */
-#define REG_UMI_BCH_T __REG32(HW_UMI_BASE + 0x48)
-/* BCH ECC Contro Status */
-#define REG_UMI_BCH_CTRL_STATUS __REG32(HW_UMI_BASE + 0x4C)
-/* BCH WR ECC 31:0 */
-#define REG_UMI_BCH_WR_ECC_0 __REG32(HW_UMI_BASE + 0x50)
-/* BCH WR ECC 63:32 */
-#define REG_UMI_BCH_WR_ECC_1 __REG32(HW_UMI_BASE + 0x54)
-/* BCH WR ECC 95:64 */
-#define REG_UMI_BCH_WR_ECC_2 __REG32(HW_UMI_BASE + 0x58)
-/* BCH WR ECC 127:96 */
-#define REG_UMI_BCH_WR_ECC_3 __REG32(HW_UMI_BASE + 0x5c)
-/* BCH WR ECC 155:128 */
-#define REG_UMI_BCH_WR_ECC_4 __REG32(HW_UMI_BASE + 0x60)
-/* BCH Read Error Location 1,0 */
-#define REG_UMI_BCH_RD_ERR_LOC_1_0 __REG32(HW_UMI_BASE + 0x64)
-/* BCH Read Error Location 3,2 */
-#define REG_UMI_BCH_RD_ERR_LOC_3_2 __REG32(HW_UMI_BASE + 0x68)
-/* BCH Read Error Location 5,4 */
-#define REG_UMI_BCH_RD_ERR_LOC_5_4 __REG32(HW_UMI_BASE + 0x6c)
-/* BCH Read Error Location 7,6 */
-#define REG_UMI_BCH_RD_ERR_LOC_7_6 __REG32(HW_UMI_BASE + 0x70)
-/* BCH Read Error Location 9,8 */
-#define REG_UMI_BCH_RD_ERR_LOC_9_8 __REG32(HW_UMI_BASE + 0x74)
-/* BCH Read Error Location 11,10 */
-#define REG_UMI_BCH_RD_ERR_LOC_B_A __REG32(HW_UMI_BASE + 0x78)
-
-/* REG_UMI_FLASH0/1/2_TCR, REG_UMI_SRAM0/1_TCR bits */
-/* Enable wait pin during burst write or read */
-#define REG_UMI_TCR_WAITEN 0x80000000
-/* Enable mem ctrlr to work iwth ext mem of lower freq than AHB clk */
-#define REG_UMI_TCR_LOWFREQ 0x40000000
-/* 1=synch write, 0=async write */
-#define REG_UMI_TCR_MEMTYPE_SYNCWRITE 0x20000000
-/* 1=synch read, 0=async read */
-#define REG_UMI_TCR_MEMTYPE_SYNCREAD 0x10000000
-/* 1=page mode read, 0=normal mode read */
-#define REG_UMI_TCR_MEMTYPE_PAGEREAD 0x08000000
-/* page size/burst size (wrap only) */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_MASK 0x07000000
-/* 4 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_4 0x00000000
-/* 8 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_8 0x01000000
-/* 16 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_16 0x02000000
-/* 32 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_32 0x03000000
-/* 64 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_64 0x04000000
-/* 128 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_128 0x05000000
-/* 256 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_256 0x06000000
-/* 512 word */
-#define REG_UMI_TCR_MEMTYPE_PGSZ_512 0x07000000
-/* Page read access cycle / Burst write latency (n+2 / n+1) */
-#define REG_UMI_TCR_TPRC_TWLC_MASK 0x00f80000
-/* Bus turnaround cycle (n) */
-#define REG_UMI_TCR_TBTA_MASK 0x00070000
-/* Write pulse width cycle (n+1) */
-#define REG_UMI_TCR_TWP_MASK 0x0000f800
-/* Write recovery cycle (n+1) */
-#define REG_UMI_TCR_TWR_MASK 0x00000600
-/* Write address setup cycle (n+1) */
-#define REG_UMI_TCR_TAS_MASK 0x00000180
-/* Output enable delay cycle (n) */
-#define REG_UMI_TCR_TOE_MASK 0x00000060
-/* Read access cycle / Burst read latency (n+2 / n+1) */
-#define REG_UMI_TCR_TRC_TLC_MASK 0x0000001f
-
-/* REG_UMI_MMD_ICR bits */
-/* Flash write protection pin control */
-#define REG_UMI_MMD_ICR_FLASH_WP 0x8000
-/* Extend hold time for sram0, sram1 csn (39 MHz operation) */
-#define REG_UMI_MMD_ICR_XHCS 0x4000
-/* Enable SDRAM 2 interface control */
-#define REG_UMI_MMD_ICR_SDRAM2EN 0x2000
-/* Enable merge of flash banks 0/1 to 512 MBit bank */
-#define REG_UMI_MMD_ICR_INST512 0x1000
-/* Enable merge of flash banks 1/2 to 512 MBit bank */
-#define REG_UMI_MMD_ICR_DATA512 0x0800
-/* Enable SDRAM interface control */
-#define REG_UMI_MMD_ICR_SDRAMEN 0x0400
-/* Polarity of busy state of Burst Wait Signal */
-#define REG_UMI_MMD_ICR_WAITPOL 0x0200
-/* Enable burst clock stopped when not accessing external burst flash/sram */
-#define REG_UMI_MMD_ICR_BCLKSTOP 0x0100
-/* Enable the peri1_csn to replace flash1_csn in 512 Mb flash mode */
-#define REG_UMI_MMD_ICR_PERI1EN 0x0080
-/* Enable the peri2_csn to replace sdram_csn */
-#define REG_UMI_MMD_ICR_PERI2EN 0x0040
-/* Enable the peri3_csn to replace sdram2_csn */
-#define REG_UMI_MMD_ICR_PERI3EN 0x0020
-/* Enable sram bank1 for H/W controlled MRS */
-#define REG_UMI_MMD_ICR_MRSB1 0x0010
-/* Enable sram bank0 for H/W controlled MRS */
-#define REG_UMI_MMD_ICR_MRSB0 0x0008
-/* Polarity for assert3ed state of H/W controlled MRS */
-#define REG_UMI_MMD_ICR_MRSPOL 0x0004
-/* 0: S/W controllable ZZ/MRS/CRE/P-Mode pin */
-/* 1: H/W controlled ZZ/MRS/CRE/P-Mode, same timing as CS */
-#define REG_UMI_MMD_ICR_MRSMODE 0x0002
-/* MRS state for S/W controlled mode */
-#define REG_UMI_MMD_ICR_MRSSTATE 0x0001
-
-/* REG_UMI_NAND_TCR bits */
-/* Enable software to control CS */
-#define REG_UMI_NAND_TCR_CS_SWCTRL 0x80000000
-/* 16-bit nand wordsize if set */
-#define REG_UMI_NAND_TCR_WORD16 0x40000000
-/* Bus turnaround cycle (n) */
-#define REG_UMI_NAND_TCR_TBTA_MASK 0x00070000
-/* Write pulse width cycle (n+1) */
-#define REG_UMI_NAND_TCR_TWP_MASK 0x0000f800
-/* Write recovery cycle (n+1) */
-#define REG_UMI_NAND_TCR_TWR_MASK 0x00000600
-/* Write address setup cycle (n+1) */
-#define REG_UMI_NAND_TCR_TAS_MASK 0x00000180
-/* Output enable delay cycle (n) */
-#define REG_UMI_NAND_TCR_TOE_MASK 0x00000060
-/* Read access cycle (n+2) */
-#define REG_UMI_NAND_TCR_TRC_TLC_MASK 0x0000001f
-
-/* REG_UMI_NAND_RCSR bits */
-/* Status: Ready=1, Busy=0 */
-#define REG_UMI_NAND_RCSR_RDY 0x02
-/* Keep CS asserted during operation */
-#define REG_UMI_NAND_RCSR_CS_ASSERTED 0x01
-
-/* REG_UMI_NAND_ECC_CSR bits */
-/* Interrupt status - read-only */
-#define REG_UMI_NAND_ECC_CSR_NANDINT 0x80000000
-/* Read: Status of ECC done, Write: clear ECC interrupt */
-#define REG_UMI_NAND_ECC_CSR_ECCINT_RAW 0x00800000
-/* Read: Status of R/B, Write: clear R/B interrupt */
-#define REG_UMI_NAND_ECC_CSR_RBINT_RAW 0x00400000
-/* 1 = Enable ECC Interrupt */
-#define REG_UMI_NAND_ECC_CSR_ECCINT_ENABLE 0x00008000
-/* 1 = Assert interrupt at rising edge of R/B_ */
-#define REG_UMI_NAND_ECC_CSR_RBINT_ENABLE 0x00004000
-/* Calculate ECC by 0=512 bytes, 1=256 bytes */
-#define REG_UMI_NAND_ECC_CSR_256BYTE 0x00000080
-/* Enable ECC in hardware */
-#define REG_UMI_NAND_ECC_CSR_ECC_ENABLE 0x00000001
-
-/* REG_UMI_BCH_CTRL_STATUS bits */
-/* Shift to Indicate Number of correctable errors detected */
-#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR_SHIFT 20
-/* Indicate Number of correctable errors detected */
-#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR 0x00F00000
-/* Indicate Errors detected during read but uncorrectable */
-#define REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR 0x00080000
-/* Indicate Errors detected during read and are correctable */
-#define REG_UMI_BCH_CTRL_STATUS_CORR_ERR 0x00040000
-/* Flag indicates BCH's ECC status of read process are valid */
-#define REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID 0x00020000
-/* Flag indicates BCH's ECC status of write process are valid */
-#define REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID 0x00010000
-/* Pause ECC calculation */
-#define REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC 0x00000010
-/* Enable Interrupt */
-#define REG_UMI_BCH_CTRL_STATUS_INT_EN 0x00000004
-/* Enable ECC during read */
-#define REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN 0x00000002
-/* Enable ECC during write */
-#define REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN 0x00000001
-/* Mask for location */
-#define REG_UMI_BCH_ERR_LOC_MASK 0x00001FFF
-/* location within a byte */
-#define REG_UMI_BCH_ERR_LOC_BYTE 0x00000007
-/* location within a word */
-#define REG_UMI_BCH_ERR_LOC_WORD 0x00000018
-/* location within a page (512 byte) */
-#define REG_UMI_BCH_ERR_LOC_PAGE 0x00001FE0
-#define REG_UMI_BCH_ERR_LOC_ADDR(index) (__REG32(HW_UMI_BASE + 0x64 + (index / 2)*4) >> ((index % 2) * 16))
-#endif
diff --git a/trunk/arch/arm/mach-davinci/board-da850-evm.c b/trunk/arch/arm/mach-davinci/board-da850-evm.c
index 07de8db14581..62b98bffc158 100644
--- a/trunk/arch/arm/mach-davinci/board-da850-evm.c
+++ b/trunk/arch/arm/mach-davinci/board-da850-evm.c
@@ -339,15 +339,6 @@ static struct davinci_mmc_config da850_mmc_config = {
.version = MMC_CTLR_VERSION_2,
};
-static void da850_panel_power_ctrl(int val)
-{
- /* lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, val);
-
- /* lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, val);
-}
-
static int da850_lcd_hw_init(void)
{
int status;
@@ -365,11 +356,17 @@ static int da850_lcd_hw_init(void)
gpio_direction_output(DA850_LCD_BL_PIN, 0);
gpio_direction_output(DA850_LCD_PWR_PIN, 0);
- /* Switch off panel power and backlight */
- da850_panel_power_ctrl(0);
+ /* disable lcd backlight */
+ gpio_set_value(DA850_LCD_BL_PIN, 0);
+
+ /* disable lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, 0);
+
+ /* enable lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, 1);
- /* Switch on panel power and backlight */
- da850_panel_power_ctrl(1);
+ /* enable lcd backlight */
+ gpio_set_value(DA850_LCD_BL_PIN, 1);
return 0;
}
@@ -677,7 +674,6 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
- sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",
diff --git a/trunk/arch/arm/mach-davinci/include/mach/nand.h b/trunk/arch/arm/mach-davinci/include/mach/nand.h
index b2ad8090bd10..b520c4b5678a 100644
--- a/trunk/arch/arm/mach-davinci/include/mach/nand.h
+++ b/trunk/arch/arm/mach-davinci/include/mach/nand.h
@@ -79,10 +79,6 @@ struct davinci_nand_pdata { /* platform_data */
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
-
- /* Main and mirror bbt descriptor overrides */
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/trunk/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h b/trunk/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
index 62d17421e48c..83f31cd0a274 100644
--- a/trunk/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
+++ b/trunk/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
@@ -5,6 +5,9 @@
#ifndef __ASM_ARCH_EP93XX_KEYPAD_H
#define __ASM_ARCH_EP93XX_KEYPAD_H
+#define MAX_MATRIX_KEY_ROWS (8)
+#define MAX_MATRIX_KEY_COLS (8)
+
/* flags for the ep93xx_keypad driver */
#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
@@ -15,6 +18,8 @@
/**
* struct ep93xx_keypad_platform_data - platform specific device structure
+ * @matrix_key_rows: number of rows in the keypad matrix
+ * @matrix_key_cols: number of columns in the keypad matrix
* @matrix_key_map: array of keycodes defining the keypad matrix
* @matrix_key_map_size: ARRAY_SIZE(matrix_key_map)
* @debounce: debounce start count; terminal count is 0xff
@@ -22,6 +27,8 @@
* @flags: see above
*/
struct ep93xx_keypad_platform_data {
+ unsigned int matrix_key_rows;
+ unsigned int matrix_key_cols;
unsigned int *matrix_key_map;
int matrix_key_map_size;
unsigned int debounce;
@@ -29,7 +36,7 @@ struct ep93xx_keypad_platform_data {
unsigned int flags;
};
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
+/* macro for creating the matrix_key_map table */
+#define KEY(row, col, val) (((row) << 28) | ((col) << 24) | (val))
#endif /* __ASM_ARCH_EP93XX_KEYPAD_H */
diff --git a/trunk/arch/arm/mach-nomadik/board-nhk8815.c b/trunk/arch/arm/mach-nomadik/board-nhk8815.c
index 9438bf6613a3..116394484e71 100644
--- a/trunk/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/trunk/arch/arm/mach-nomadik/board-nhk8815.c
@@ -18,7 +18,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -150,7 +149,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = {
}
};
-static struct onenand_platform_data nhk8815_onenand_data = {
+static struct flash_platform_data nhk8815_onenand_data = {
.parts = nhk8815_onenand_partitions,
.nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
};
@@ -164,7 +163,7 @@ static struct resource nhk8815_onenand_resource[] = {
};
static struct platform_device nhk8815_onenand_device = {
- .name = "onenand-flash",
+ .name = "onenand",
.id = -1,
.dev = {
.platform_data = &nhk8815_onenand_data,
@@ -175,10 +174,10 @@ static struct platform_device nhk8815_onenand_device = {
static void __init nhk8815_onenand_init(void)
{
-#ifdef CONFIG_MTD_ONENAND
+#ifdef CONFIG_ONENAND
/* Set up SMCS0 for OneNand */
- writel(0x000030db, FSMC_BCR(0));
- writel(0x02100551, FSMC_BTR(0));
+ writel(0x000030db, FSMC_BCR0);
+ writel(0x02100551, FSMC_BTR0);
#endif
}
diff --git a/trunk/arch/arm/mach-u300/include/mach/coh901318.h b/trunk/arch/arm/mach-u300/include/mach/coh901318.h
deleted file mode 100644
index f4cfee9c7d28..000000000000
--- a/trunk/arch/arm/mach-u300/include/mach/coh901318.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- *
- * include/linux/coh901318.h
- *
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * DMA driver for COH 901 318
- * Author: Per Friden
- */
-
-#ifndef COH901318_H
-#define COH901318_H
-
-#include
-#include
-
-#define MAX_DMA_PACKET_SIZE_SHIFT 11
-#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
-
-/**
- * struct coh901318_lli - linked list item for DMAC
- * @control: control settings for DMAC
- * @src_addr: transfer source address
- * @dst_addr: transfer destination address
- * @link_addr: physical address to next lli
- * @virt_link_addr: virtual addres of next lli (only used by pool_free)
- * @phy_this: physical address of current lli (only used by pool_free)
- */
-struct coh901318_lli {
- u32 control;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- dma_addr_t link_addr;
-
- void *virt_link_addr;
- dma_addr_t phy_this;
-};
-/**
- * struct coh901318_params - parameters for DMAC configuration
- * @config: DMA config register
- * @ctrl_lli_last: DMA control register for the last lli in the list
- * @ctrl_lli: DMA control register for an lli
- * @ctrl_lli_chained: DMA control register for a chained lli
- */
-struct coh901318_params {
- u32 config;
- u32 ctrl_lli_last;
- u32 ctrl_lli;
- u32 ctrl_lli_chained;
-};
-/**
- * struct coh_dma_channel - dma channel base
- * @name: ascii name of dma channel
- * @number: channel id number
- * @desc_nbr_max: number of preallocated descriptortors
- * @priority_high: prio of channel, 0 low otherwise high.
- * @param: configuration parameters
- * @dev_addr: physical address of periphal connected to channel
- */
-struct coh_dma_channel {
- const char name[32];
- const int number;
- const int desc_nbr_max;
- const int priority_high;
- const struct coh901318_params param;
- const dma_addr_t dev_addr;
-};
-
-/**
- * dma_access_memory_state_t - register dma for memory access
- *
- * @dev: The dma device
- * @active: 1 means dma intends to access memory
- * 0 means dma wont access memory
- */
-typedef void (*dma_access_memory_state_t)(struct device *dev,
- bool active);
-
-/**
- * struct powersave - DMA power save structure
- * @lock: lock protecting data in this struct
- * @started_channels: bit mask indicating active dma channels
- */
-struct powersave {
- spinlock_t lock;
- u64 started_channels;
-};
-/**
- * struct coh901318_platform - platform arch structure
- * @chans_slave: specifying dma slave channels
- * @chans_memcpy: specifying dma memcpy channels
- * @access_memory_state: requesting DMA memeory access (on / off)
- * @chan_conf: dma channel configurations
- * @max_channels: max number of dma chanenls
- */
-struct coh901318_platform {
- const int *chans_slave;
- const int *chans_memcpy;
- const dma_access_memory_state_t access_memory_state;
- const struct coh_dma_channel *chan_conf;
- const int max_channels;
-};
-
-/**
- * coh901318_get_bytes_left() - Get number of bytes left on a current transfer
- * @chan: dma channel handle
- * return number of bytes left, or negative on error
- */
-u32 coh901318_get_bytes_left(struct dma_chan *chan);
-
-/**
- * coh901318_stop() - Stops dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_stop(struct dma_chan *chan);
-
-/**
- * coh901318_continue() - Resumes a stopped dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_continue(struct dma_chan *chan);
-
-/**
- * coh901318_filter_id() - DMA channel filter function
- * @chan: dma channel handle
- * @chan_id: id of dma channel to be filter out
- *
- * In dma_request_channel() it specifies what channel id to be requested
- */
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
-
-/*
- * DMA Controller - this access the static mappings of the coh901318 dma.
- *
- */
-
-#define COH901318_MOD32_MASK (0x1F)
-#define COH901318_WORD_MASK (0xFFFFFFFF)
-/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
-#define COH901318_INT_STATUS1 (0x0000)
-#define COH901318_INT_STATUS2 (0x0004)
-/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_TC_INT_STATUS1 (0x0008)
-#define COH901318_TC_INT_STATUS2 (0x000C)
-/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_TC_INT_CLEAR1 (0x0010)
-#define COH901318_TC_INT_CLEAR2 (0x0014)
-/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
-#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
-/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
-#define COH901318_BE_INT_STATUS1 (0x0020)
-#define COH901318_BE_INT_STATUS2 (0x0024)
-/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_BE_INT_CLEAR1 (0x0028)
-#define COH901318_BE_INT_CLEAR2 (0x002C)
-/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
-#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
-
-/*
- * CX_CFG - Channel Configuration Registers 32bit (R/W)
- */
-#define COH901318_CX_CFG (0x0100)
-#define COH901318_CX_CFG_SPACING (0x04)
-/* Channel enable activates tha dma job */
-#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
-#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
-/* Request Mode */
-#define COH901318_CX_CFG_RM_MASK (0x00000006)
-#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
-#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
-#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
-/* Linked channel request field. RM must == 11 */
-#define COH901318_CX_CFG_LCRF_SHIFT 3
-#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
-#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
-/* Terminal Counter Interrupt Request Mask */
-#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
-#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
-/* Bus Error interrupt Mask */
-#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
-#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
-
-/*
- * CX_STAT - Channel Status Registers 32bit (R/-)
- */
-#define COH901318_CX_STAT (0x0200)
-#define COH901318_CX_STAT_SPACING (0x04)
-#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
-#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
-#define COH901318_CX_STAT_ACTIVE (0x00000002)
-#define COH901318_CX_STAT_ENABLED (0x00000001)
-
-/*
- * CX_CTRL - Channel Control Registers 32bit (R/W)
- */
-#define COH901318_CX_CTRL (0x0400)
-#define COH901318_CX_CTRL_SPACING (0x10)
-/* Transfer Count Enable */
-#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
-#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
-/* Transfer Count Value 0 - 4095 */
-#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
-/* Burst count */
-#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
-#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
-/* Source bus size */
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
-/* Source address increment */
-#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
-#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
-/* Destination Bus Size */
-#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
-/* Destination address increment */
-#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
-#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
-/* Master Mode (Master2 is only connected to MSL) */
-#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
-#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
-/* Terminal Count flag to PER enable */
-#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
-#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
-/* Terminal Count flags to CPU enable */
-#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
-#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
-/* Hand shake to peripheral */
-#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
-#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
-#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
-#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
-/* DMA mode */
-#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
-#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
-/* Primary Request Data Destination */
-#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
-#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
-#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
-
-/*
- * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
- */
-#define COH901318_CX_SRC_ADDR (0x0404)
-#define COH901318_CX_SRC_ADDR_SPACING (0x10)
-
-/*
- * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
- */
-#define COH901318_CX_DST_ADDR (0x0408)
-#define COH901318_CX_DST_ADDR_SPACING (0x10)
-
-/*
- * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
- */
-#define COH901318_CX_LNK_ADDR (0x040C)
-#define COH901318_CX_LNK_ADDR_SPACING (0x10)
-#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
-#endif /* COH901318_H */
diff --git a/trunk/arch/arm/plat-mxc/include/mach/mxc_nand.h b/trunk/arch/arm/plat-mxc/include/mach/mxc_nand.h
index 5d2d21d414e0..2b972df22d12 100644
--- a/trunk/arch/arm/plat-mxc/include/mach/mxc_nand.h
+++ b/trunk/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -22,7 +22,6 @@
struct mxc_nand_platform_data {
int width; /* data bus width in bytes */
- int hw_ecc:1; /* 0 if supress hardware ECC */
- int flash_bbt:1; /* set to 1 to use a flash based bbt */
+ int hw_ecc; /* 0 if supress hardware ECC */
};
#endif /* __ASM_ARCH_NAND_H */
diff --git a/trunk/arch/arm/plat-s3c/include/plat/nand.h b/trunk/arch/arm/plat-s3c/include/plat/nand.h
index 226147b7e026..065985978413 100644
--- a/trunk/arch/arm/plat-s3c/include/plat/nand.h
+++ b/trunk/arch/arm/plat-s3c/include/plat/nand.h
@@ -17,7 +17,6 @@
* Setting this flag will allow the kernel to
* look for it at boot time and also skip the NAND
* scan.
- * @options: Default value to set into 'struct nand_chip' options.
* @nr_chips: Number of chips in this set
* @nr_partitions: Number of partitions pointed to by @partitions
* @name: Name of set (optional)
@@ -32,7 +31,6 @@ struct s3c2410_nand_set {
unsigned int disable_ecc:1;
unsigned int flash_bbt:1;
- unsigned int options;
int nr_chips;
int nr_partitions;
char *name;
diff --git a/trunk/arch/avr32/include/asm/elf.h b/trunk/arch/avr32/include/asm/elf.h
index 3b3159b710d4..d5d1d41c600a 100644
--- a/trunk/arch/avr32/include/asm/elf.h
+++ b/trunk/arch/avr32/include/asm/elf.h
@@ -77,6 +77,7 @@ typedef struct user_fpu_struct elf_fpregset_t;
#endif
#define ELF_ARCH EM_AVR32
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/blackfin/include/asm/bfin-lq035q1.h b/trunk/arch/blackfin/include/asm/bfin-lq035q1.h
deleted file mode 100644
index 57bc21ac2296..000000000000
--- a/trunk/arch/blackfin/include/asm/bfin-lq035q1.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
- *
- * Copyright 2008-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef BFIN_LQ035Q1_H
-#define BFIN_LQ035Q1_H
-
-#define LQ035_RL (0 << 8) /* Right -> Left Scan */
-#define LQ035_LR (1 << 8) /* Left -> Right Scan */
-#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
-#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
-#define LQ035_BGR (1 << 11) /* Use BGR format */
-#define LQ035_RGB (0 << 11) /* Use RGB format */
-#define LQ035_NORM (1 << 13) /* Reversal */
-#define LQ035_REV (0 << 13) /* Reversal */
-
-struct bfin_lq035q1fb_disp_info {
-
- unsigned mode;
- /* GPIOs */
- int use_bl;
- unsigned gpio_bl;
-};
-
-#endif /* BFIN_LQ035Q1_H */
diff --git a/trunk/arch/blackfin/include/asm/elf.h b/trunk/arch/blackfin/include/asm/elf.h
index 5b50f0ecacf8..8e0764c81eaf 100644
--- a/trunk/arch/blackfin/include/asm/elf.h
+++ b/trunk/arch/blackfin/include/asm/elf.h
@@ -55,6 +55,7 @@ do { \
_regs->p2 = _dynamic_addr; \
} while(0)
+#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
#define ELF_EXEC_PAGESIZE 4096
diff --git a/trunk/arch/cris/include/asm/elf.h b/trunk/arch/cris/include/asm/elf.h
index 8a3d8e2b33c1..0f51b10b9f4f 100644
--- a/trunk/arch/cris/include/asm/elf.h
+++ b/trunk/arch/cris/include/asm/elf.h
@@ -64,6 +64,8 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
+#define USE_ELF_CORE_DUMP
+
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/frv/include/asm/elf.h b/trunk/arch/frv/include/asm/elf.h
index c3819804a74b..7bbf6e47f8c8 100644
--- a/trunk/arch/frv/include/asm/elf.h
+++ b/trunk/arch/frv/include/asm/elf.h
@@ -115,6 +115,7 @@ do { \
__kernel_frame0_ptr->gr29 = 0; \
} while(0)
+#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384
diff --git a/trunk/arch/h8300/include/asm/elf.h b/trunk/arch/h8300/include/asm/elf.h
index c24fa250d653..94e2284c8816 100644
--- a/trunk/arch/h8300/include/asm/elf.h
+++ b/trunk/arch/h8300/include/asm/elf.h
@@ -34,6 +34,7 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLAT_INIT(_r) _r->er1 = 0
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/ia64/ia32/elfcore32.h b/trunk/arch/ia64/ia32/elfcore32.h
index 657725742617..9a3abf58cea3 100644
--- a/trunk/arch/ia64/ia32/elfcore32.h
+++ b/trunk/arch/ia64/ia32/elfcore32.h
@@ -11,6 +11,8 @@
#include
#include
+#define USE_ELF_CORE_DUMP 1
+
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
diff --git a/trunk/arch/ia64/include/asm/dma-mapping.h b/trunk/arch/ia64/include/asm/dma-mapping.h
index 7d09a09cdaad..8d3c79cd81e7 100644
--- a/trunk/arch/ia64/include/asm/dma-mapping.h
+++ b/trunk/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size - 1 <= *dev->dma_mask;
+ return addr + size <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/trunk/arch/ia64/include/asm/elf.h b/trunk/arch/ia64/include/asm/elf.h
index e14108b19c09..86eddee029cb 100644
--- a/trunk/arch/ia64/include/asm/elf.h
+++ b/trunk/arch/ia64/include/asm/elf.h
@@ -25,6 +25,7 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
+#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/trunk/arch/ia64/include/asm/io.h b/trunk/arch/ia64/include/asm/io.h
index cc8335eb3110..0d9d16e2d949 100644
--- a/trunk/arch/ia64/include/asm/io.h
+++ b/trunk/arch/ia64/include/asm/io.h
@@ -424,8 +424,6 @@ __writeq (unsigned long val, volatile void __iomem *addr)
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
-extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
-extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
/*
* String version of IO memory access ops:
diff --git a/trunk/arch/ia64/mm/ioremap.c b/trunk/arch/ia64/mm/ioremap.c
index 3dccdd8eb275..2a140627dfd6 100644
--- a/trunk/arch/ia64/mm/ioremap.c
+++ b/trunk/arch/ia64/mm/ioremap.c
@@ -21,12 +21,6 @@ __ioremap (unsigned long phys_addr)
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
-void __iomem *
-early_ioremap (unsigned long phys_addr, unsigned long size)
-{
- return __ioremap(phys_addr);
-}
-
void __iomem *
ioremap (unsigned long phys_addr, unsigned long size)
{
@@ -107,11 +101,6 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap_nocache);
-void
-early_iounmap (volatile void __iomem *addr, unsigned long size)
-{
-}
-
void
iounmap (volatile void __iomem *addr)
{
diff --git a/trunk/arch/ia64/sn/pci/tioca_provider.c b/trunk/arch/ia64/sn/pci/tioca_provider.c
index efb454534e52..35b2a27d2e77 100644
--- a/trunk/arch/ia64/sn/pci/tioca_provider.c
+++ b/trunk/arch/ia64/sn/pci/tioca_provider.c
@@ -9,7 +9,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -370,7 +369,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int ps, ps_shift, entry, entries, mapsize;
+ int i, ps, ps_shift, entry, entries, mapsize, last_entry;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@@ -411,13 +410,23 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
- if (entry >= mapsize) {
+ entry = find_first_zero_bit(map, mapsize);
+ while (entry < mapsize) {
+ last_entry = find_next_bit(map, mapsize, entry);
+
+ if (last_entry - entry >= entries)
+ break;
+
+ entry = find_next_zero_bit(map, mapsize, last_entry);
+ }
+
+ if (entry > mapsize) {
kfree(ca_dmamap);
goto map_return;
}
- bitmap_set(map, entry, entries);
+ for (i = 0; i < entries; i++)
+ set_bit(entry + i, map);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
diff --git a/trunk/arch/m32r/include/asm/elf.h b/trunk/arch/m32r/include/asm/elf.h
index 2f85412ef730..0cc34c94bf2b 100644
--- a/trunk/arch/m32r/include/asm/elf.h
+++ b/trunk/arch/m32r/include/asm/elf.h
@@ -102,6 +102,7 @@ typedef elf_fpreg_t elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/trunk/arch/m68k/include/asm/elf.h b/trunk/arch/m68k/include/asm/elf.h
index 01c193d91412..0b0f49eb876b 100644
--- a/trunk/arch/m68k/include/asm/elf.h
+++ b/trunk/arch/m68k/include/asm/elf.h
@@ -59,6 +59,7 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
+#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
diff --git a/trunk/arch/microblaze/include/asm/elf.h b/trunk/arch/microblaze/include/asm/elf.h
index 7d4acf2b278e..f92fc0dda006 100644
--- a/trunk/arch/microblaze/include/asm/elf.h
+++ b/trunk/arch/microblaze/include/asm/elf.h
@@ -77,6 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB
#endif
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
diff --git a/trunk/arch/mips/include/asm/elf.h b/trunk/arch/mips/include/asm/elf.h
index 7a6a35dbe529..7990694cda22 100644
--- a/trunk/arch/mips/include/asm/elf.h
+++ b/trunk/arch/mips/include/asm/elf.h
@@ -326,6 +326,7 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/trunk/arch/mn10300/include/asm/elf.h b/trunk/arch/mn10300/include/asm/elf.h
index e5fa97cd9a14..75a70aa9fd6f 100644
--- a/trunk/arch/mn10300/include/asm/elf.h
+++ b/trunk/arch/mn10300/include/asm/elf.h
@@ -77,6 +77,7 @@ do { \
_ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
} while (0)
+#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
diff --git a/trunk/arch/parisc/include/asm/bug.h b/trunk/arch/parisc/include/asm/bug.h
index 75e46c557a16..8cfc553fc837 100644
--- a/trunk/arch/parisc/include/asm/bug.h
+++ b/trunk/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
- unreachable(); \
+ for(;;) ; \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
- unreachable(); \
+ for(;;) ; \
} while(0)
#endif
diff --git a/trunk/arch/parisc/include/asm/elf.h b/trunk/arch/parisc/include/asm/elf.h
index 19f6cb1a4a1c..9c802eb4be84 100644
--- a/trunk/arch/parisc/include/asm/elf.h
+++ b/trunk/arch/parisc/include/asm/elf.h
@@ -328,6 +328,7 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/parisc/include/asm/ftrace.h b/trunk/arch/parisc/include/asm/ftrace.h
index 72c0fafaa039..2fa05dd6aeee 100644
--- a/trunk/arch/parisc/include/asm/ftrace.h
+++ b/trunk/arch/parisc/include/asm/ftrace.h
@@ -20,20 +20,6 @@ struct ftrace_ret_stack {
* Defined in entry.S
*/
extern void return_to_handler(void);
-
-
-extern unsigned long return_address(unsigned int);
-
-#define HAVE_ARCH_CALLER_ADDR
-
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 return_address(1)
-#define CALLER_ADDR2 return_address(2)
-#define CALLER_ADDR3 return_address(3)
-#define CALLER_ADDR4 return_address(4)
-#define CALLER_ADDR5 return_address(5)
-#define CALLER_ADDR6 return_address(6)
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/trunk/arch/parisc/kernel/asm-offsets.c b/trunk/arch/parisc/kernel/asm-offsets.c
index ec787b411e9a..fcd3c707bf12 100644
--- a/trunk/arch/parisc/kernel/asm-offsets.c
+++ b/trunk/arch/parisc/kernel/asm-offsets.c
@@ -244,6 +244,9 @@ int main(void)
DEFINE(THREAD_SZ, sizeof(struct thread_info));
DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
BLANK();
+ DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
+ DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
+ BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c
index efbcee5d2220..f47465e8d040 100644
--- a/trunk/arch/parisc/kernel/irq.c
+++ b/trunk/arch/parisc/kernel/irq.c
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
#endif
static struct irq_chip cpu_interrupt_type = {
- .name = "CPU",
+ .typename = "CPU",
.startup = cpu_startup_irq,
.shutdown = cpu_disable_irq,
.enable = cpu_enable_irq,
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
diff --git a/trunk/arch/parisc/kernel/signal.c b/trunk/arch/parisc/kernel/signal.c
index fb37ac52e46c..e8467e4aa8d1 100644
--- a/trunk/arch/parisc/kernel/signal.c
+++ b/trunk/arch/parisc/kernel/signal.c
@@ -26,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
#include
diff --git a/trunk/arch/parisc/kernel/smp.c b/trunk/arch/parisc/kernel/smp.c
index 3f2fce8ce6b6..1fd0f0cec037 100644
--- a/trunk/arch/parisc/kernel/smp.c
+++ b/trunk/arch/parisc/kernel/smp.c
@@ -60,6 +60,8 @@ static int smp_debug_lvl = 0;
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
+DEFINE_SPINLOCK(smp_lock);
+
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
@@ -67,7 +69,7 @@ static volatile int cpu_now_booting __cpuinitdata;
static int parisc_max_cpus __cpuinitdata = 1;
-static DEFINE_PER_CPU(spinlock_t, ipi_lock);
+DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
enum ipi_message_type {
IPI_NOP=0,
@@ -436,11 +438,6 @@ void __init smp_prepare_boot_cpu(void)
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(ipi_lock, cpu));
-
init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
diff --git a/trunk/arch/parisc/kernel/unwind.c b/trunk/arch/parisc/kernel/unwind.c
index d58eac1a8288..a36799e85693 100644
--- a/trunk/arch/parisc/kernel/unwind.c
+++ b/trunk/arch/parisc/kernel/unwind.c
@@ -13,7 +13,6 @@
#include
#include
#include
-#include
#include
#include
@@ -116,18 +115,24 @@ unwind_table_init(struct unwind_table *table, const char *name,
}
}
-static int cmp_unwind_table_entry(const void *a, const void *b)
-{
- return ((const struct unwind_table_entry *)a)->region_start
- - ((const struct unwind_table_entry *)b)->region_start;
-}
-
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
- sort(start, finish - start, sizeof(struct unwind_table_entry),
- cmp_unwind_table_entry, NULL);
+ struct unwind_table_entry el, *p, *q;
+
+ for (p = start + 1; p < finish; ++p) {
+ if (p[0].region_start < p[-1].region_start) {
+ el = *p;
+ q = p;
+ do {
+ q[0] = q[-1];
+ --q;
+ } while (q > start &&
+ el.region_start < q[-1].region_start);
+ *q = el;
+ }
+ }
}
struct unwind_table *
@@ -412,30 +417,3 @@ int unwind_to_user(struct unwind_frame_info *info)
return ret;
}
-
-unsigned long return_address(unsigned int level)
-{
- struct unwind_frame_info info;
- struct pt_regs r;
- unsigned long sp;
-
- /* initialize unwind info */
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long) current_text_addr();
- r.gr[2] = (unsigned long) __builtin_return_address(0);
- r.gr[30] = sp;
- unwind_frame_init(&info, current, &r);
-
- /* unwind stack */
- ++level;
- do {
- if (unwind_once(&info) < 0 || info.ip == 0)
- return 0;
- if (!__kernel_text_address(info.ip)) {
- return 0;
- }
- } while (info.ip && level--);
-
- return info.ip;
-}
diff --git a/trunk/arch/powerpc/include/asm/async_tx.h b/trunk/arch/powerpc/include/asm/async_tx.h
deleted file mode 100644
index 8b2dc55d01ab..000000000000
--- a/trunk/arch/powerpc/include/asm/async_tx.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2008-2009 DENX Software Engineering.
- *
- * Author: Yuri Tikhonov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef _ASM_POWERPC_ASYNC_TX_H_
-#define _ASM_POWERPC_ASYNC_TX_H_
-
-#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
-extern struct dma_chan *
-ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
- struct page **dst_lst, int dst_cnt, struct page **src_lst,
- int src_cnt, size_t src_sz);
-
-#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
- src_cnt, src_sz) \
- ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
- src_cnt, src_sz)
-#else
-
-#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
- __async_tx_find_channel(dep, type)
-
-struct dma_chan *
-__async_tx_find_channel(struct async_submit_ctl *submit,
- enum dma_transaction_type tx_type);
-
-#endif
-
-#endif
diff --git a/trunk/arch/powerpc/include/asm/dcr-regs.h b/trunk/arch/powerpc/include/asm/dcr-regs.h
index 380274de429f..828e3aa1f2fc 100644
--- a/trunk/arch/powerpc/include/asm/dcr-regs.h
+++ b/trunk/arch/powerpc/include/asm/dcr-regs.h
@@ -157,27 +157,4 @@
#define L2C_SNP_SSR_32G 0x0000f000
#define L2C_SNP_ESR 0x00000800
-/*
- * DCR register offsets for 440SP/440SPe I2O/DMA controller.
- * The base address is configured in the device tree.
- */
-#define DCRN_I2O0_IBAL 0x006
-#define DCRN_I2O0_IBAH 0x007
-#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */
-
-/* 440SP/440SPe Software Reset DCR */
-#define DCRN_SDR0_SRST 0x0200
-#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */
-
-/* 440SP/440SPe Memory Queue DCR offsets */
-#define DCRN_MQ0_XORBA 0x04
-#define DCRN_MQ0_CF2H 0x06
-#define DCRN_MQ0_CFBHL 0x0f
-#define DCRN_MQ0_BAUH 0x10
-
-/* HB/LL Paths Configuration Register */
-#define MQ0_CFBHL_TPLM 28
-#define MQ0_CFBHL_HBCL 23
-#define MQ0_CFBHL_POLY 15
-
#endif /* __DCR_REGS_H__ */
diff --git a/trunk/arch/powerpc/include/asm/dma-mapping.h b/trunk/arch/powerpc/include/asm/dma-mapping.h
index 80a973bb9e71..e281daebddca 100644
--- a/trunk/arch/powerpc/include/asm/dma-mapping.h
+++ b/trunk/arch/powerpc/include/asm/dma-mapping.h
@@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size - 1 <= *dev->dma_mask;
+ return addr + size <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/trunk/arch/powerpc/include/asm/elf.h b/trunk/arch/powerpc/include/asm/elf.h
index 17828ad411eb..014a624f4c8e 100644
--- a/trunk/arch/powerpc/include/asm/elf.h
+++ b/trunk/arch/powerpc/include/asm/elf.h
@@ -170,6 +170,7 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
+#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/trunk/arch/powerpc/include/asm/ptrace.h b/trunk/arch/powerpc/include/asm/ptrace.h
index cbd759e3cd78..8c341490cfc5 100644
--- a/trunk/arch/powerpc/include/asm/ptrace.h
+++ b/trunk/arch/powerpc/include/asm/ptrace.h
@@ -140,8 +140,6 @@ extern void user_enable_single_step(struct task_struct *);
extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
-#define ARCH_HAS_USER_SINGLE_STEP_INFO
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/trunk/arch/powerpc/kernel/iommu.c b/trunk/arch/powerpc/kernel/iommu.c
index 5547ae6e6b0b..fd51578e29dd 100644
--- a/trunk/arch/powerpc/kernel/iommu.c
+++ b/trunk/arch/powerpc/kernel/iommu.c
@@ -30,7 +30,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -251,7 +251,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
- bitmap_clear(tbl->it_map, free_entry, npages);
+ iommu_area_free(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c
index d069ff8a7e03..804f0f30f227 100644
--- a/trunk/arch/powerpc/kernel/traps.c
+++ b/trunk/arch/powerpc/kernel/traps.c
@@ -174,15 +174,6 @@ int die(const char *str, struct pt_regs *regs, long err)
return 0;
}
-void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info)
-{
- memset(info, 0, sizeof(*info));
- info->si_signo = SIGTRAP;
- info->si_code = TRAP_TRACE;
- info->si_addr = (void __user *)regs->nip;
-}
-
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
diff --git a/trunk/arch/s390/include/asm/elf.h b/trunk/arch/s390/include/asm/elf.h
index 354d42616c7e..e885442c1dfe 100644
--- a/trunk/arch/s390/include/asm/elf.h
+++ b/trunk/arch/s390/include/asm/elf.h
@@ -155,6 +155,7 @@ extern unsigned int vdso_enabled;
} while (0)
#define CORE_DUMP_USE_REGSET
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/score/include/asm/elf.h b/trunk/arch/score/include/asm/elf.h
index f478ce94181f..43526d9fda93 100644
--- a/trunk/arch/score/include/asm/elf.h
+++ b/trunk/arch/score/include/asm/elf.h
@@ -61,6 +61,7 @@ struct task_struct;
struct pt_regs;
#define CORE_DUMP_USE_REGSET
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/trunk/arch/sh/Kconfig.debug b/trunk/arch/sh/Kconfig.debug
index 12fec72fec5f..55907af1dc25 100644
--- a/trunk/arch/sh/Kconfig.debug
+++ b/trunk/arch/sh/Kconfig.debug
@@ -19,6 +19,50 @@ config SH_STANDARD_BIOS
mask ROM and no flash (WindowsCE machines fall in this category).
If unsure, say N.
+config EARLY_SCIF_CONSOLE
+ bool "Use early SCIF console"
+ help
+ This enables an early console using a fixed SCIF port. This can
+ be used by platforms that are either not running the SH
+ standard BIOS, or do not wish to use the BIOS callbacks for the
+ serial I/O.
+
+config EARLY_SCIF_CONSOLE_PORT
+ hex
+ depends on EARLY_SCIF_CONSOLE
+ default "0xa4400000" if CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7705
+ default "0xa4430000" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
+ default "0xf8420000" if CPU_SUBTYPE_SH7619
+ default "0xff804000" if CPU_SUBTYPE_MXG
+ default "0xffc30000" if CPU_SUBTYPE_SHX3
+ default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \
+ CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \
+ CPU_SUBTYPE_SH7343
+ default "0xfe4c0000" if CPU_SUBTYPE_SH7757
+ default "0xffeb0000" if CPU_SUBTYPE_SH7785
+ default "0xffeb0000" if CPU_SUBTYPE_SH7786
+ default "0xfffe8000" if CPU_SUBTYPE_SH7203
+ default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
+ default "0xffe80000" if CPU_SH4
+ default "0xa4000150" if CPU_SH3
+ default "0x00000000"
+
+config EARLY_PRINTK
+ bool "Early printk support"
+ depends on SH_STANDARD_BIOS || EARLY_SCIF_CONSOLE
+ help
+ Say Y here to redirect kernel printk messages to the serial port
+ used by the SH-IPL bootloader, starting very early in the boot
+ process and ending when the kernel's serial console is initialised.
+ This option is only useful porting the kernel to a new machine,
+ when the kernel may crash or hang before the serial console is
+ initialised. If unsure, say N.
+
+ On devices that are running SH-IPL and want to keep the port
+ initialization consistent while not using the BIOS callbacks,
+ select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
+ the kernel command line option to toggle back and forth.
+
config STACK_DEBUG
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
diff --git a/trunk/arch/sh/boards/mach-ap325rxa/setup.c b/trunk/arch/sh/boards/mach-ap325rxa/setup.c
index 1f5fa5c44f6d..cf9dc12dfeb1 100644
--- a/trunk/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/trunk/arch/sh/boards/mach-ap325rxa/setup.c
@@ -316,24 +316,20 @@ static struct soc_camera_platform_info camera_info = {
.format_name = "UYVY",
.format_depth = 16,
.format = {
- .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
- .field = V4L2_FIELD_NONE,
.width = 640,
.height = 480,
},
.bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
- SOCAM_DATA_ACTIVE_HIGH,
+ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
.set_capture = camera_set_capture,
-};
-
-struct soc_camera_link camera_link = {
- .bus_id = 0,
- .add_device = ap325rxa_camera_add,
- .del_device = ap325rxa_camera_del,
- .module_name = "soc_camera_platform",
- .priv = &camera_info,
+ .link = {
+ .bus_id = 0,
+ .add_device = ap325rxa_camera_add,
+ .del_device = ap325rxa_camera_del,
+ .module_name = "soc_camera_platform",
+ },
};
static void dummy_release(struct device *dev)
@@ -351,7 +347,7 @@ static struct platform_device camera_device = {
static int ap325rxa_camera_add(struct soc_camera_link *icl,
struct device *dev)
{
- if (icl != &camera_link || camera_probe() <= 0)
+ if (icl != &camera_info.link || camera_probe() <= 0)
return -ENODEV;
camera_info.dev = dev;
@@ -361,7 +357,7 @@ static int ap325rxa_camera_add(struct soc_camera_link *icl,
static void ap325rxa_camera_del(struct soc_camera_link *icl)
{
- if (icl != &camera_link)
+ if (icl != &camera_info.link)
return;
platform_device_unregister(&camera_device);
@@ -474,15 +470,13 @@ static struct ov772x_camera_info ov7725_info = {
.buswidth = SOCAM_DATAWIDTH_8,
.flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
.edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
-};
-
-static struct soc_camera_link ov7725_link = {
- .bus_id = 0,
- .power = ov7725_power,
- .board_info = &ap325rxa_i2c_camera[0],
- .i2c_adapter_id = 0,
- .module_name = "ov772x",
- .priv = &ov7725_info,
+ .link = {
+ .bus_id = 0,
+ .power = ov7725_power,
+ .board_info = &ap325rxa_i2c_camera[0],
+ .i2c_adapter_id = 0,
+ .module_name = "ov772x",
+ },
};
static struct platform_device ap325rxa_camera[] = {
@@ -490,13 +484,13 @@ static struct platform_device ap325rxa_camera[] = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
- .platform_data = &ov7725_link,
+ .platform_data = &ov7725_info.link,
},
}, {
.name = "soc-camera-pdrv",
.id = 1,
.dev = {
- .platform_data = &camera_link,
+ .platform_data = &camera_info.link,
},
},
};
diff --git a/trunk/arch/sh/boards/mach-ecovec24/setup.c b/trunk/arch/sh/boards/mach-ecovec24/setup.c
index 194aaca22d47..826e62326d51 100644
--- a/trunk/arch/sh/boards/mach-ecovec24/setup.c
+++ b/trunk/arch/sh/boards/mach-ecovec24/setup.c
@@ -19,18 +19,11 @@
#include
#include
#include
-#include
-#include
-#include
-#include
#include
#include
#include
#include
-#include
#include
-#include
-#include
#include
#include
#include
@@ -345,12 +338,6 @@ static struct platform_device ceu1_device = {
};
/* I2C device */
-static struct i2c_board_info i2c0_devices[] = {
- {
- I2C_BOARD_INFO("da7210", 0x1a),
- },
-};
-
static struct i2c_board_info i2c1_devices[] = {
{
I2C_BOARD_INFO("r2025sd", 0x32),
@@ -434,7 +421,6 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0,
};
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
/* SHDI0 */
static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{
@@ -507,248 +493,6 @@ static struct platform_device sdhi1_device = {
},
};
-#else
-
-static int mmc_spi_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_PTY6);
-}
-
-static int mmc_spi_get_cd(struct device *dev)
-{
- return !gpio_get_value(GPIO_PTY7);
-}
-
-static void mmc_spi_setpower(struct device *dev, unsigned int maskval)
-{
- gpio_set_value(GPIO_PTB6, maskval ? 1 : 0);
-}
-
-static struct mmc_spi_platform_data mmc_spi_info = {
- .get_ro = mmc_spi_get_ro,
- .get_cd = mmc_spi_get_cd,
- .caps = MMC_CAP_NEEDS_POLL,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */
- .setpower = mmc_spi_setpower,
-};
-
-static struct spi_board_info spi_bus[] = {
- {
- .modalias = "mmc_spi",
- .platform_data = &mmc_spi_info,
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_0,
- .controller_data = (void *) GPIO_PTM4,
- },
-};
-
-static struct sh_msiof_spi_info msiof0_data = {
- .num_chipselect = 1,
-};
-
-static struct resource msiof0_resources[] = {
- [0] = {
- .name = "MSIOF0",
- .start = 0xa4c40000,
- .end = 0xa4c40063,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 84,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device msiof0_device = {
- .name = "spi_sh_msiof",
- .id = 0, /* MSIOF0 */
- .dev = {
- .platform_data = &msiof0_data,
- },
- .num_resources = ARRAY_SIZE(msiof0_resources),
- .resource = msiof0_resources,
- .archdata = {
- .hwblk_id = HWBLK_MSIOF0,
- },
-};
-
-#endif
-
-/* I2C Video/Camera */
-static struct i2c_board_info i2c_camera[] = {
- {
- I2C_BOARD_INFO("tw9910", 0x45),
- },
- {
- /* 1st camera */
- I2C_BOARD_INFO("mt9t112", 0x3c),
- },
- {
- /* 2nd camera */
- I2C_BOARD_INFO("mt9t112", 0x3c),
- },
-};
-
-/* tw9910 */
-static int tw9910_power(struct device *dev, int mode)
-{
- int val = mode ? 0 : 1;
-
- gpio_set_value(GPIO_PTU2, val);
- if (mode)
- mdelay(100);
-
- return 0;
-}
-
-static struct tw9910_video_info tw9910_info = {
- .buswidth = SOCAM_DATAWIDTH_8,
- .mpout = TW9910_MPO_FIELD,
-};
-
-static struct soc_camera_link tw9910_link = {
- .i2c_adapter_id = 0,
- .bus_id = 1,
- .power = tw9910_power,
- .board_info = &i2c_camera[0],
- .module_name = "tw9910",
- .priv = &tw9910_info,
-};
-
-/* mt9t112 */
-static int mt9t112_power1(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_PTA3, mode);
- if (mode)
- mdelay(100);
-
- return 0;
-}
-
-static struct mt9t112_camera_info mt9t112_info1 = {
- .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
- .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
-};
-
-static struct soc_camera_link mt9t112_link1 = {
- .i2c_adapter_id = 0,
- .power = mt9t112_power1,
- .bus_id = 0,
- .board_info = &i2c_camera[1],
- .module_name = "mt9t112",
- .priv = &mt9t112_info1,
-};
-
-static int mt9t112_power2(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_PTA4, mode);
- if (mode)
- mdelay(100);
-
- return 0;
-}
-
-static struct mt9t112_camera_info mt9t112_info2 = {
- .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
- .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
-};
-
-static struct soc_camera_link mt9t112_link2 = {
- .i2c_adapter_id = 1,
- .power = mt9t112_power2,
- .bus_id = 1,
- .board_info = &i2c_camera[2],
- .module_name = "mt9t112",
- .priv = &mt9t112_info2,
-};
-
-static struct platform_device camera_devices[] = {
- {
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &tw9910_link,
- },
- },
- {
- .name = "soc-camera-pdrv",
- .id = 1,
- .dev = {
- .platform_data = &mt9t112_link1,
- },
- },
- {
- .name = "soc-camera-pdrv",
- .id = 2,
- .dev = {
- .platform_data = &mt9t112_link2,
- },
- },
-};
-
-/* FSI */
-/*
- * FSI-B use external clock which came from da7210.
- * So, we should change parent of fsi
- */
-#define FCLKBCR 0xa415000c
-static void fsimck_init(struct clk *clk)
-{
- u32 status = ctrl_inl(clk->enable_reg);
-
- /* use external clock */
- status &= ~0x000000ff;
- status |= 0x00000080;
-
- ctrl_outl(status, clk->enable_reg);
-}
-
-static struct clk_ops fsimck_clk_ops = {
- .init = fsimck_init,
-};
-
-static struct clk fsimckb_clk = {
- .name = "fsimckb_clk",
- .id = -1,
- .ops = &fsimck_clk_ops,
- .enable_reg = (void __iomem *)FCLKBCR,
- .rate = 0, /* unknown */
-};
-
-struct sh_fsi_platform_info fsi_info = {
- .portb_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(I2S) |
- SH_FSI_IFMT(I2S),
-};
-
-static struct resource fsi_resources[] = {
- [0] = {
- .name = "FSI",
- .start = 0xFE3C0000,
- .end = 0xFE3C021d,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 108,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device fsi_device = {
- .name = "sh_fsi",
- .id = 0,
- .num_resources = ARRAY_SIZE(fsi_resources),
- .resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
- .archdata = {
- .hwblk_id = HWBLK_SPU, /* FSI needs SPU hwblk */
- },
-};
-
static struct platform_device *ecovec_devices[] __initdata = {
&heartbeat_device,
&nor_flash_device,
@@ -759,16 +503,8 @@ static struct platform_device *ecovec_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
&sdhi0_device,
&sdhi1_device,
-#else
- &msiof0_device,
-#endif
- &camera_devices[0],
- &camera_devices[1],
- &camera_devices[2],
- &fsi_device,
};
#define EEPROM_ADDR 0x50
@@ -824,8 +560,6 @@ extern char ecovec24_sdram_leave_end;
static int __init arch_setup(void)
{
- struct clk *clk;
-
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
&ecovec24_sdram_enter_start,
@@ -1039,8 +773,7 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
- /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
+ /* enable SDHI0 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL);
gpio_request(GPIO_FN_SDHI0CMD, NULL);
@@ -1052,7 +785,7 @@ static int __init arch_setup(void)
gpio_request(GPIO_PTB6, NULL);
gpio_direction_output(GPIO_PTB6, 0);
- /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
+ /* enable SDHI1 (needs DS2.6,7 set to ON,OFF) */
gpio_request(GPIO_FN_SDHI1CD, NULL);
gpio_request(GPIO_FN_SDHI1WP, NULL);
gpio_request(GPIO_FN_SDHI1CMD, NULL);
@@ -1066,59 +799,8 @@ static int __init arch_setup(void)
/* I/O buffer drive ability is high for SDHI1 */
ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
-#else
- /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
- gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
- gpio_request(GPIO_FN_MSIOF0_RXD, NULL);
- gpio_request(GPIO_FN_MSIOF0_TSCK, NULL);
- gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */
- gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
- gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
- gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
- gpio_request(GPIO_PTY6, NULL); /* write protect */
- gpio_direction_input(GPIO_PTY6);
- gpio_request(GPIO_PTY7, NULL); /* card detect */
- gpio_direction_input(GPIO_PTY7);
-
- spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
-#endif
-
- /* enable Video */
- gpio_request(GPIO_PTU2, NULL);
- gpio_direction_output(GPIO_PTU2, 1);
-
- /* enable Camera */
- gpio_request(GPIO_PTA3, NULL);
- gpio_request(GPIO_PTA4, NULL);
- gpio_direction_output(GPIO_PTA3, 0);
- gpio_direction_output(GPIO_PTA4, 0);
-
- /* enable FSI */
- gpio_request(GPIO_FN_FSIMCKB, NULL);
- gpio_request(GPIO_FN_FSIIBSD, NULL);
- gpio_request(GPIO_FN_FSIOBSD, NULL);
- gpio_request(GPIO_FN_FSIIBBCK, NULL);
- gpio_request(GPIO_FN_FSIIBLRCK, NULL);
- gpio_request(GPIO_FN_FSIOBBCK, NULL);
- gpio_request(GPIO_FN_FSIOBLRCK, NULL);
- gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
-
- /* change parent of FSI B */
- clk = clk_get(NULL, "fsib_clk");
- clk_register(&fsimckb_clk);
- clk_set_parent(clk, &fsimckb_clk);
- clk_set_rate(clk, 11000);
- clk_set_rate(&fsimckb_clk, 11000);
- clk_put(clk);
-
- gpio_request(GPIO_PTU0, NULL);
- gpio_direction_output(GPIO_PTU0, 0);
- mdelay(20);
/* enable I2C device */
- i2c_register_board_info(0, i2c0_devices,
- ARRAY_SIZE(i2c0_devices));
-
i2c_register_board_info(1, i2c1_devices,
ARRAY_SIZE(i2c1_devices));
diff --git a/trunk/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/trunk/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
index e9b970846c41..8ccb1cc8b589 100644
--- a/trunk/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
+++ b/trunk/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
@@ -273,12 +273,6 @@ int kfr2r09_lcd_setup(void *board_data, void *sohandle,
return 0;
}
-void kfr2r09_lcd_start(void *board_data, void *sohandle,
- struct sh_mobile_lcdc_sys_bus_ops *so)
-{
- write_memory_start(sohandle, so);
-}
-
#define CTRL_CKSW 0x10
#define CTRL_C10 0x20
#define CTRL_CPSW 0x80
diff --git a/trunk/arch/sh/boards/mach-kfr2r09/setup.c b/trunk/arch/sh/boards/mach-kfr2r09/setup.c
index 5d7b5d92475e..87438d6603d6 100644
--- a/trunk/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/trunk/arch/sh/boards/mach-kfr2r09/setup.c
@@ -19,7 +19,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -150,7 +149,6 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
},
.board_cfg = {
.setup_sys = kfr2r09_lcd_setup,
- .start_transfer = kfr2r09_lcd_start,
.display_on = kfr2r09_lcd_on,
.display_off = kfr2r09_lcd_off,
},
@@ -257,9 +255,6 @@ static struct i2c_board_info kfr2r09_i2c_camera = {
static struct clk *camera_clk;
-/* set VIO_CKO clock to 25MHz */
-#define CEU_MCLK_FREQ 25000000
-
#define DRVCRB 0xA405018C
static int camera_power(struct device *dev, int mode)
{
@@ -272,7 +267,8 @@ static int camera_power(struct device *dev, int mode)
if (IS_ERR(camera_clk))
return PTR_ERR(camera_clk);
- rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ);
+ /* set VIO_CKO clock to 25MHz */
+ rate = clk_round_rate(camera_clk, 25000000);
ret = clk_set_rate(camera_clk, rate);
if (ret < 0)
goto eclkrate;
@@ -322,17 +318,11 @@ static int camera_power(struct device *dev, int mode)
return ret;
}
-static struct rj54n1_pdata rj54n1_priv = {
- .mclk_freq = CEU_MCLK_FREQ,
- .ioctl_high = false,
-};
-
static struct soc_camera_link rj54n1_link = {
.power = camera_power,
.board_info = &kfr2r09_i2c_camera,
.i2c_adapter_id = 1,
.module_name = "rj54n1cb0c",
- .priv = &rj54n1_priv,
};
static struct platform_device kfr2r09_camera = {
diff --git a/trunk/arch/sh/boards/mach-migor/setup.c b/trunk/arch/sh/boards/mach-migor/setup.c
index 507c77be476d..9099b6da9957 100644
--- a/trunk/arch/sh/boards/mach-migor/setup.c
+++ b/trunk/arch/sh/boards/mach-migor/setup.c
@@ -432,27 +432,23 @@ static struct i2c_board_info migor_i2c_camera[] = {
static struct ov772x_camera_info ov7725_info = {
.buswidth = SOCAM_DATAWIDTH_8,
-};
-
-static struct soc_camera_link ov7725_link = {
- .power = ov7725_power,
- .board_info = &migor_i2c_camera[0],
- .i2c_adapter_id = 0,
- .module_name = "ov772x",
- .priv = &ov7725_info,
+ .link = {
+ .power = ov7725_power,
+ .board_info = &migor_i2c_camera[0],
+ .i2c_adapter_id = 0,
+ .module_name = "ov772x",
+ },
};
static struct tw9910_video_info tw9910_info = {
.buswidth = SOCAM_DATAWIDTH_8,
.mpout = TW9910_MPO_FIELD,
-};
-
-static struct soc_camera_link tw9910_link = {
- .power = tw9910_power,
- .board_info = &migor_i2c_camera[1],
- .i2c_adapter_id = 0,
- .module_name = "tw9910",
- .priv = &tw9910_info,
+ .link = {
+ .power = tw9910_power,
+ .board_info = &migor_i2c_camera[1],
+ .i2c_adapter_id = 0,
+ .module_name = "tw9910",
+ }
};
static struct platform_device migor_camera[] = {
@@ -460,13 +456,13 @@ static struct platform_device migor_camera[] = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
- .platform_data = &ov7725_link,
+ .platform_data = &ov7725_info.link,
},
}, {
.name = "soc-camera-pdrv",
.id = 1,
.dev = {
- .platform_data = &tw9910_link,
+ .platform_data = &tw9910_info.link,
},
},
};
diff --git a/trunk/arch/sh/boards/mach-se/7722/irq.c b/trunk/arch/sh/boards/mach-se/7722/irq.c
index b221b6842b0d..4eb31acfafef 100644
--- a/trunk/arch/sh/boards/mach-se/7722/irq.c
+++ b/trunk/arch/sh/boards/mach-se/7722/irq.c
@@ -57,16 +57,15 @@ static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
*/
void __init init_se7722_IRQ(void)
{
- int i, irq;
+ int i;
ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
- irq = create_irq();
- if (irq < 0)
+ se7722_fpga_irq[i] = create_irq();
+ if (se7722_fpga_irq[i] < 0)
return;
- se7722_fpga_irq[i] = irq;
set_irq_chip_and_handler_name(se7722_fpga_irq[i],
&se7722_irq_chip,
diff --git a/trunk/arch/sh/boards/mach-se/7724/setup.c b/trunk/arch/sh/boards/mach-se/7724/setup.c
index 5d0f70b46c97..4b0f0c0dc2b8 100644
--- a/trunk/arch/sh/boards/mach-se/7724/setup.c
+++ b/trunk/arch/sh/boards/mach-se/7724/setup.c
@@ -514,13 +514,6 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&sdhi1_cn8_device,
};
-/* I2C device */
-static struct i2c_board_info i2c0_devices[] = {
- {
- I2C_BOARD_INFO("ak4642", 0x12),
- },
-};
-
#define EEPROM_OP 0xBA206000
#define EEPROM_ADR 0xBA206004
#define EEPROM_DATA 0xBA20600C
@@ -582,16 +575,6 @@ extern char ms7724se_sdram_enter_end;
extern char ms7724se_sdram_leave_start;
extern char ms7724se_sdram_leave_end;
-
-static int __init arch_setup(void)
-{
- /* enable I2C device */
- i2c_register_board_info(0, i2c0_devices,
- ARRAY_SIZE(i2c0_devices));
- return 0;
-}
-arch_initcall(arch_setup);
-
static int __init devices_setup(void)
{
u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
diff --git a/trunk/arch/sh/configs/ecovec24-romimage_defconfig b/trunk/arch/sh/configs/ecovec24-romimage_defconfig
index 46874704e4e7..0774924623cc 100644
--- a/trunk/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/trunk/arch/sh/configs/ecovec24-romimage_defconfig
@@ -203,7 +203,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
-CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
# CONFIG_X2TLB is not set
CONFIG_VSYSCALL=y
diff --git a/trunk/arch/sh/configs/ecovec24_defconfig b/trunk/arch/sh/configs/ecovec24_defconfig
index cad918437ca7..ac6469718a2c 100644
--- a/trunk/arch/sh/configs/ecovec24_defconfig
+++ b/trunk/arch/sh/configs/ecovec24_defconfig
@@ -204,7 +204,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
-CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
# CONFIG_X2TLB is not set
CONFIG_VSYSCALL=y
diff --git a/trunk/arch/sh/configs/rts7751r2d1_defconfig b/trunk/arch/sh/configs/rts7751r2d1_defconfig
index 6f1126b3e487..f521e82cc19e 100644
--- a/trunk/arch/sh/configs/rts7751r2d1_defconfig
+++ b/trunk/arch/sh/configs/rts7751r2d1_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
#
# Bus options
diff --git a/trunk/arch/sh/configs/rts7751r2dplus_defconfig b/trunk/arch/sh/configs/rts7751r2dplus_defconfig
index 9215bbb13d6f..a156cd1e0617 100644
--- a/trunk/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/trunk/arch/sh/configs/rts7751r2dplus_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
#
# Bus options
diff --git a/trunk/arch/sh/include/asm/elf.h b/trunk/arch/sh/include/asm/elf.h
index ac04255022b6..ccb1d93bb043 100644
--- a/trunk/arch/sh/include/asm/elf.h
+++ b/trunk/arch/sh/include/asm/elf.h
@@ -114,6 +114,7 @@ typedef struct user_fpu_struct elf_fpregset_t;
*/
#define CORE_DUMP_USE_REGSET
+#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/trunk/arch/sh/include/asm/io.h b/trunk/arch/sh/include/asm/io.h
index 026dd659a640..512cd3e9d0ca 100644
--- a/trunk/arch/sh/include/asm/io.h
+++ b/trunk/arch/sh/include/asm/io.h
@@ -233,16 +233,10 @@ unsigned long long poke_real_address_q(unsigned long long addr,
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
-void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
- unsigned long flags, void *caller);
+void __iomem *__ioremap(unsigned long offset, unsigned long size,
+ unsigned long flags);
void __iounmap(void __iomem *addr);
-static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
-{
- return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
-}
-
static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
@@ -277,7 +271,6 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
return __ioremap(offset, size, flags);
}
#else
-#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
diff --git a/trunk/arch/sh/include/asm/pgtable_32.h b/trunk/arch/sh/include/asm/pgtable_32.h
index 5003ee86f67b..b35435516203 100644
--- a/trunk/arch/sh/include/asm/pgtable_32.h
+++ b/trunk/arch/sh/include/asm/pgtable_32.h
@@ -344,8 +344,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB
-#define pte_write(pte) \
- ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
+#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
#else
#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
#endif
@@ -359,7 +358,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
* individually toggled (and user permissions are entirely decoupled from
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
-PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
diff --git a/trunk/arch/sh/include/asm/unistd_32.h b/trunk/arch/sh/include/asm/unistd_32.h
index f18c4f9baf27..f3fd1b9eb6b1 100644
--- a/trunk/arch/sh/include/asm/unistd_32.h
+++ b/trunk/arch/sh/include/asm/unistd_32.h
@@ -345,9 +345,8 @@
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
-#define __NR_recvmmsg 337
-#define NR_syscalls 338
+#define NR_syscalls 337
#ifdef __KERNEL__
diff --git a/trunk/arch/sh/include/asm/unistd_64.h b/trunk/arch/sh/include/asm/unistd_64.h
index 3e7645d11130..343ce8f073ea 100644
--- a/trunk/arch/sh/include/asm/unistd_64.h
+++ b/trunk/arch/sh/include/asm/unistd_64.h
@@ -385,11 +385,10 @@
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
#define __NR_perf_event_open 364
-#define __NR_recvmmsg 365
#ifdef __KERNEL__
-#define NR_syscalls 366
+#define NR_syscalls 365
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/trunk/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h b/trunk/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
index 484ef42c2fb5..174374e19547 100644
--- a/trunk/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
+++ b/trunk/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
@@ -8,8 +8,6 @@ void kfr2r09_lcd_on(void *board_data);
void kfr2r09_lcd_off(void *board_data);
int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
-void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
- struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
#else
static inline void kfr2r09_lcd_on(void *board_data) {}
static inline void kfr2r09_lcd_off(void *board_data) {}
@@ -18,10 +16,6 @@ static inline int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
{
return -ENODEV;
}
-static inline void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
- struct sh_mobile_lcdc_sys_bus_ops *sys_ops)
-{
-}
#endif
#endif /* __ASM_SH_KFR2R09_H */
diff --git a/trunk/arch/sh/kernel/Makefile b/trunk/arch/sh/kernel/Makefile
index 0d587da1ef12..0471a3eb25ed 100644
--- a/trunk/arch/sh/kernel/Makefile
+++ b/trunk/arch/sh/kernel/Makefile
@@ -22,10 +22,11 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/trunk/arch/sh/kernel/cpu/irq/ipr.c b/trunk/arch/sh/kernel/cpu/irq/ipr.c
index 9282d965a1b6..c1508a90fc6a 100644
--- a/trunk/arch/sh/kernel/cpu/irq/ipr.c
+++ b/trunk/arch/sh/kernel/cpu/irq/ipr.c
@@ -17,17 +17,16 @@
* for more details.
*/
#include
-#include
-#include
#include
-#include
#include
+#include
+#include
#include
static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
{
struct irq_chip *chip = get_irq_chip(irq);
- return container_of(chip, struct ipr_desc, chip);
+ return (void *)((char *)chip - offsetof(struct ipr_desc, chip));
}
static void disable_ipr_irq(unsigned int irq)
diff --git a/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 114c7cee7184..8555c05e8667 100644
--- a/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -59,48 +59,32 @@ static struct intc_prio_reg prio_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
NULL, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xf8400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 88, 88, 88, 88 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xf8410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 92, 92, 92, 92 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xf8420000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 96, 96, 96, 96 },
-};
-
-static struct platform_device scif2_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xf8400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 88, 88, 88, 88 },
+ }, {
+ .mapbase = 0xf8410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 92, 92, 92, 92 },
+ }, {
+ .mapbase = 0xf8420000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 96, 96, 96, 96 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 2,
+ .id = -1,
.dev = {
- .platform_data = &scif2_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -192,9 +176,7 @@ static struct platform_device cmt1_device = {
};
static struct platform_device *sh7619_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
+ &sci_device,
ð_device,
&cmt0_device,
&cmt1_device,
@@ -213,9 +195,6 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7619_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&cmt0_device,
&cmt1_device,
};
diff --git a/trunk/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/trunk/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 8f669dc9b0da..b67376445315 100644
--- a/trunk/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/trunk/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -207,23 +207,27 @@ static struct platform_device mtu2_2_device = {
.num_resources = ARRAY_SIZE(mtu2_2_resources),
};
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xff804000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 220, 220, 220, 220 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xff804000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 220, 220, 220, 220 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 0,
+ .id = -1,
.dev = {
- .platform_data = &scif0_platform_data,
+ .platform_data = sci_platform_data,
},
};
static struct platform_device *mxg_devices[] __initdata = {
- &scif0_device,
+ &sci_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
@@ -242,7 +246,6 @@ void __init plat_irq_setup(void)
}
static struct platform_device *mxg_early_devices[] __initdata = {
- &scif0_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index 4ccfeb59eb1a..fbde5b75deb9 100644
--- a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -177,123 +177,57 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 180, 180, 180, 180 }
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 184, 184, 184, 184 }
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 188, 188, 188, 188 }
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 192, 192, 192, 192 }
-};
-
-static struct platform_device scif3_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 180, 180, 180, 180 }
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 184, 184, 184, 184 }
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 188, 188, 188, 188 }
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 }
+ }, {
+ .mapbase = 0xfffea000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 }
+ }, {
+ .mapbase = 0xfffea800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 }
+ }, {
+ .mapbase = 0xfffeb000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 }
+ }, {
+ .mapbase = 0xfffeb800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 208, 208, 208, 208 }
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xfffea000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 196, 196, 196, 196 }
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xfffea800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 200, 200, 200, 200 }
-};
-
-static struct platform_device scif5_device = {
- .name = "sh-sci",
- .id = 5,
- .dev = {
- .platform_data = &scif5_platform_data,
- },
-};
-
-static struct plat_sci_port scif6_platform_data = {
- .mapbase = 0xfffeb000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 204, 204, 204, 204 }
-};
-
-static struct platform_device scif6_device = {
- .name = "sh-sci",
- .id = 6,
- .dev = {
- .platform_data = &scif6_platform_data,
- },
-};
-
-static struct plat_sci_port scif7_platform_data = {
- .mapbase = 0xfffeb800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 208, 208, 208, 208 }
-};
-
-static struct platform_device scif7_device = {
- .name = "sh-sci",
- .id = 7,
+ .id = -1,
.dev = {
- .platform_data = &scif7_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -411,14 +345,7 @@ static struct platform_device mtu2_2_device = {
};
static struct platform_device *sh7201_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
- &scif6_device,
- &scif7_device,
+ &sci_device,
&rtc_device,
&mtu2_0_device,
&mtu2_1_device,
@@ -438,14 +365,6 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7201_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
- &scif6_device,
- &scif7_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 3136966cc9b3..d3fd536c9a84 100644
--- a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -173,63 +173,37 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 192, 192, 192, 192 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 },
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 },
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 },
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 196, 196, 196, 196 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 200, 200, 200, 200 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 204, 204, 204, 204 },
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
+ .id = -1,
.dev = {
- .platform_data = &scif3_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -380,10 +354,7 @@ static struct platform_device rtc_device = {
};
static struct platform_device *sh7203_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
+ &sci_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
@@ -404,10 +375,6 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7203_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 064873585a8b..a9ccc5e8d9e9 100644
--- a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -133,63 +133,37 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 240, 240, 240, 240 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 240, 240, 240, 240 },
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 244, 244, 244, 244 },
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 248, 248, 248, 248 },
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 252, 252, 252, 252 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 244, 244, 244, 244 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 248, 248, 248, 248 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 252, 252, 252, 252 },
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
+ .id = -1,
.dev = {
- .platform_data = &scif3_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -351,10 +325,7 @@ static struct platform_device mtu2_2_device = {
};
static struct platform_device *sh7206_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
+ &sci_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
@@ -375,10 +346,6 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7206_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index 7b892d60e3a0..c23105983878 100644
--- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -67,33 +67,27 @@ static struct intc_prio_reg prio_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
NULL, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52 },
-};
-
-static struct platform_device scif1_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56 },
+ }, {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 1,
+ .id = -1,
.dev = {
- .platform_data = &scif1_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -216,11 +210,10 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7705_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &sci_device,
&rtc_device,
};
@@ -232,8 +225,6 @@ static int __init sh7705_devices_setup(void)
arch_initcall(sh7705_devices_setup);
static struct platform_device *sh7705_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index bc0c4f68c7c7..347ab35d0697 100644
--- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -106,55 +106,44 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xfffffe80,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 23, 23, 0 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffffe80,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
},
-};
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xa4000150,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56, 56 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
+ {
+ .mapbase = 0xa4000150,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
},
-};
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xa4000140,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_IRDA,
- .irqs = { 52, 52, 52, 52 },
+ {
+ .mapbase = 0xa4000140,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_IRDA,
+ .irqs = { 52, 52, 52, 52 },
+ },
+#endif
+ {
+ .flags = 0,
+ }
};
-static struct platform_device scif2_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 2,
+ .id = -1,
.dev = {
- .platform_data = &scif2_platform_data,
+ .platform_data = sci_platform_data,
},
};
-#endif
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
@@ -249,19 +238,10 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh770x_devices[] __initdata = {
- &scif0_device,
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
- &scif1_device,
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
- &scif2_device,
-#endif
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &sci_device,
&rtc_device,
};
@@ -273,16 +253,6 @@ static int __init sh770x_devices_setup(void)
arch_initcall(sh770x_devices_setup);
static struct platform_device *sh770x_early_devices[] __initdata = {
- &scif0_device,
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
- &scif1_device,
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
- &scif2_device,
-#endif
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 0845a3ad006d..717e90ae1097 100644
--- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -96,33 +96,28 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52, 52 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56, 56 },
-};
-
-static struct platform_device scif1_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+ }, {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
+ }, {
+
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 1,
+ .id = -1,
.dev = {
- .platform_data = &scif1_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -219,11 +214,10 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7710_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &sci_device,
&rtc_device,
};
@@ -235,8 +229,6 @@ static int __init sh7710_devices_setup(void)
arch_initcall(sh7710_devices_setup);
static struct platform_device *sh7710_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a718a6231091..74d8baaf8e96 100644
--- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -48,33 +48,28 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xa4430000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xa4438000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
-};
-
-static struct platform_device scif1_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4430000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ }, {
+ .mapbase = 0xa4438000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ }, {
+
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 1,
+ .id = -1,
.dev = {
- .platform_data = &scif1_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -374,8 +369,6 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7720_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&cmt0_device,
&cmt1_device,
&cmt2_device,
@@ -385,6 +378,7 @@ static struct platform_device *sh7720_devices[] __initdata = {
&tmu1_device,
&tmu2_device,
&rtc_device,
+ &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -397,8 +391,6 @@ static int __init sh7720_devices_setup(void)
arch_initcall(sh7720_devices_setup);
static struct platform_device *sh7720_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&cmt0_device,
&cmt1_device,
&cmt2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index 4b733715cdb5..de4827df19aa 100644
--- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -15,18 +15,22 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 0,
+ .id = -1,
.dev = {
- .platform_data = &scif0_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -123,7 +127,7 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh4202_devices[] __initdata = {
- &scif0_device,
+ &sci_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -137,7 +141,6 @@ static int __init sh4202_devices_setup(void)
arch_initcall(sh4202_devices_setup);
static struct platform_device *sh4202_early_devices[] __initdata = {
- &scif0_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index b2a9df1af64c..1b8b122e8f3d 100644
--- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -35,33 +35,29 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 23, 23, 0 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+#ifndef CONFIG_SH_RTS7751R2D
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
+ }, {
+#endif
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif1_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 1,
+ .id = -1,
.dev = {
- .platform_data = &scif1_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -225,9 +221,8 @@ static struct platform_device tmu4_device = {
#endif
static struct platform_device *sh7750_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&rtc_device,
+ &sci_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -247,8 +242,6 @@ static int __init sh7750_devices_setup(void)
arch_initcall(sh7750_devices_setup);
static struct platform_device *sh7750_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 5b74cc0b43da..7fbb7be9284c 100644
--- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -126,63 +126,37 @@ static struct intc_vect vectors_irq[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xfe600000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xfe610000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 72, 73, 75, 74 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xfe620000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 77, 79, 78 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xfe480000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 80, 81, 82, 0 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfe600000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .mapbase = 0xfe610000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 72, 73, 75, 74 },
+ }, {
+ .mapbase = 0xfe620000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ }, {
+ .mapbase = 0xfe480000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 80, 81, 82, 0 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif3_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 3,
+ .id = -1,
.dev = {
- .platform_data = &scif3_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -280,10 +254,7 @@ static struct platform_device tmu2_device = {
static struct platform_device *sh7760_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
+ &sci_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -297,10 +268,6 @@ static int __init sh7760_devices_setup(void)
arch_initcall(sh7760_devices_setup);
static struct platform_device *sh7760_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 45eb1bfd42c9..ac4d5672ec1a 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -15,71 +15,6 @@
#include
#include
-/* Serial */
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xffe30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 83, 83, 83, 83 },
- .clk = "scif3",
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
static struct resource iic0_resources[] = {
[0] = {
.name = "IIC0",
@@ -330,17 +265,52 @@ static struct platform_device tmu2_device = {
.num_resources = ARRAY_SIZE(tmu2_resources),
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+ }, {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+ }, {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+ }, {
+ .mapbase = 0xffe30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 83, 83, 83, 83 },
+ .clk = "scif3",
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct platform_device *sh7343_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic0_device,
&iic1_device,
+ &sci_device,
&vpu_device,
&veu_device,
&jpu_device,
@@ -358,10 +328,6 @@ static int __init sh7343_devices_setup(void)
arch_initcall(sh7343_devices_setup);
static struct platform_device *sh7343_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index c494c193e3b6..4a9010bf4fd3 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -18,22 +18,6 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
@@ -292,13 +276,33 @@ static struct platform_device tmu2_device = {
.num_resources = ARRAY_SIZE(tmu2_resources),
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct platform_device *sh7366_devices[] __initdata = {
- &scif0_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic_device,
+ &sci_device,
&usb_host_device,
&vpu_device,
&veu0_device,
@@ -317,7 +321,6 @@ static int __init sh7366_devices_setup(void)
arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
- &scif0_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b5335b5e309c..5491b094cf05 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -20,55 +20,6 @@
#include
#include
-/* Serial */
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
@@ -388,6 +339,41 @@ static struct platform_device tmu2_device = {
},
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+ },
+ {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+ },
+ {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+ },
+ {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct sh_dmae_pdata dma_platform_data = {
.mode = 0,
};
@@ -401,9 +387,6 @@ static struct platform_device dma_device = {
};
static struct platform_device *sh7722_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -411,6 +394,7 @@ static struct platform_device *sh7722_devices[] __initdata = {
&rtc_device,
&usbf_device,
&iic_device,
+ &sci_device,
&vpu_device,
&veu_device,
&jpu_device,
@@ -429,9 +413,6 @@ static int __init sh7722_devices_setup(void)
arch_initcall(sh7722_devices_setup);
static struct platform_device *sh7722_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 772b9265d0e4..4caa5a7ca86e 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -20,103 +20,6 @@
#include
#include
-/* Serial */
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xa4e30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xa4e40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xa4e50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
-};
-
-static struct platform_device scif5_device = {
- .name = "sh-sci",
- .id = 5,
- .dev = {
- .platform_data = &scif5_platform_data,
- },
-};
-
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
@@ -445,6 +348,56 @@ static struct platform_device tmu5_device = {
},
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+ },{
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+ },{
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+ },{
+ .mapbase = 0xa4e30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+ .clk = "scif3",
+ },{
+ .mapbase = 0xa4e40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+ .clk = "scif4",
+ },{
+ .mapbase = 0xa4e50000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+ .clk = "scif5",
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
@@ -535,12 +488,6 @@ static struct platform_device iic_device = {
};
static struct platform_device *sh7723_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -548,6 +495,7 @@ static struct platform_device *sh7723_devices[] __initdata = {
&tmu3_device,
&tmu4_device,
&tmu5_device,
+ &sci_device,
&rtc_device,
&iic_device,
&sh7723_usb_host_device,
@@ -568,12 +516,6 @@ static int __init sh7723_devices_setup(void)
arch_initcall(sh7723_devices_setup);
static struct platform_device *sh7723_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index a52f35117e82..845e89c936e7 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -27,99 +27,53 @@
#include
/* Serial */
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xa4e30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xa4e40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
-};
-
-static struct platform_device scif4_device = {
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+ }, {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+ }, {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+ }, {
+ .mapbase = 0xa4e30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+ .clk = "scif3",
+ }, {
+ .mapbase = 0xa4e40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+ .clk = "scif4",
+ }, {
+ .mapbase = 0xa4e50000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+ .clk = "scif5",
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xa4e50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
-};
-
-static struct platform_device scif5_device = {
- .name = "sh-sci",
- .id = 5,
+ .id = -1,
.dev = {
- .platform_data = &scif5_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -636,12 +590,6 @@ static struct platform_device spu1_device = {
};
static struct platform_device *sh7724_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -649,6 +597,7 @@ static struct platform_device *sh7724_devices[] __initdata = {
&tmu3_device,
&tmu4_device,
&tmu5_device,
+ &sci_device,
&rtc_device,
&iic0_device,
&iic1_device,
@@ -675,12 +624,6 @@ static int __init sh7724_devices_setup(void)
arch_initcall(sh7724_devices_setup);
static struct platform_device *sh7724_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 37e32efbbaa7..c470e15f2e03 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -17,51 +17,6 @@
#include
#include
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xfe4b0000, /* SCIF2 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xfe4c0000, /* SCIF3 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xfe4d0000, /* SCIF4 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 104, 104, 104, 104 },
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -124,12 +79,39 @@ static struct platform_device tmu1_device = {
.num_resources = ARRAY_SIZE(tmu1_resources),
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfe4b0000, /* SCIF2 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ }, {
+ .mapbase = 0xfe4c0000, /* SCIF3 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ }, {
+ .mapbase = 0xfe4d0000, /* SCIF4 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct platform_device *sh7757_devices[] __initdata = {
- &scif2_device,
- &scif3_device,
- &scif4_device,
&tmu0_device,
&tmu1_device,
+ &sci_device,
};
static int __init sh7757_devices_setup(void)
@@ -139,20 +121,6 @@ static int __init sh7757_devices_setup(void)
}
arch_initcall(sh7757_devices_setup);
-static struct platform_device *sh7757_early_devices[] __initdata = {
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &tmu0_device,
- &tmu1_device,
-};
-
-void __init plat_early_device_setup(void)
-{
- early_platform_add_devices(sh7757_early_devices,
- ARRAY_SIZE(sh7757_early_devices));
-}
-
enum {
UNUSED = 0,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 6aba26fec416..4659fff6b842 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -16,51 +16,6 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe08000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 104, 104, 104, 104 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffe80000,
@@ -81,6 +36,35 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ }, {
+ .mapbase = 0xffe08000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ }, {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffec8000,
@@ -313,9 +297,6 @@ static struct platform_device tmu5_device = {
};
static struct platform_device *sh7763_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -323,6 +304,7 @@ static struct platform_device *sh7763_devices[] __initdata = {
&tmu4_device,
&tmu5_device,
&rtc_device,
+ &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -335,9 +317,6 @@ static int __init sh7763_devices_setup(void)
arch_initcall(sh7763_devices_setup);
static struct platform_device *sh7763_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index c1643bc9590d..eead08d89d32 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -14,153 +14,67 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xff923000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xff924000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xff925000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xff926000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 64, 64, 64, 64 },
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xff927000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 65, 65, 65, 65 },
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xff928000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 66, 66, 66, 66 },
-};
-
-static struct platform_device scif5_device = {
- .name = "sh-sci",
- .id = 5,
- .dev = {
- .platform_data = &scif5_platform_data,
- },
-};
-
-static struct plat_sci_port scif6_platform_data = {
- .mapbase = 0xff929000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 67, 67, 67, 67 },
-};
-
-static struct platform_device scif6_device = {
- .name = "sh-sci",
- .id = 6,
- .dev = {
- .platform_data = &scif6_platform_data,
- },
-};
-
-static struct plat_sci_port scif7_platform_data = {
- .mapbase = 0xff92a000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 68, 68, 68, 68 },
-};
-
-static struct platform_device scif7_device = {
- .name = "sh-sci",
- .id = 7,
- .dev = {
- .platform_data = &scif7_platform_data,
- },
-};
-
-static struct plat_sci_port scif8_platform_data = {
- .mapbase = 0xff92b000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 69, 69, 69, 69 },
-};
-
-static struct platform_device scif8_device = {
- .name = "sh-sci",
- .id = 8,
- .dev = {
- .platform_data = &scif8_platform_data,
- },
-};
-
-static struct plat_sci_port scif9_platform_data = {
- .mapbase = 0xff92c000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 70, 70, 70, 70 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xff923000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+ }, {
+ .mapbase = 0xff924000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+ }, {
+ .mapbase = 0xff925000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+ }, {
+ .mapbase = 0xff926000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 64, 64, 64, 64 },
+ }, {
+ .mapbase = 0xff927000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 65, 65, 65, 65 },
+ }, {
+ .mapbase = 0xff928000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 66, 66, 66, 66 },
+ }, {
+ .mapbase = 0xff929000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 67, 67, 67, 67 },
+ }, {
+ .mapbase = 0xff92a000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 68, 68, 68, 68 },
+ }, {
+ .mapbase = 0xff92b000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 69, 69, 69, 69 },
+ }, {
+ .mapbase = 0xff92c000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 70, 70, 70, 70 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif9_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 9,
+ .id = -1,
.dev = {
- .platform_data = &scif9_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -437,16 +351,6 @@ static struct platform_device tmu8_device = {
};
static struct platform_device *sh7770_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
- &scif6_device,
- &scif7_device,
- &scif8_device,
- &scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -456,6 +360,7 @@ static struct platform_device *sh7770_devices[] __initdata = {
&tmu6_device,
&tmu7_device,
&tmu8_device,
+ &sci_device,
};
static int __init sh7770_devices_setup(void)
@@ -466,16 +371,6 @@ static int __init sh7770_devices_setup(void)
arch_initcall(sh7770_devices_setup);
static struct platform_device *sh7770_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
- &scif6_device,
- &scif7_device,
- &scif8_device,
- &scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index c310558490d5..12ff56f19c5c 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -15,36 +15,6 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -247,6 +217,30 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ }, {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct sh_dmae_pdata dma_platform_data = {
.mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
};
@@ -260,8 +254,6 @@ static struct platform_device dma_device = {
};
static struct platform_device *sh7780_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -269,6 +261,7 @@ static struct platform_device *sh7780_devices[] __initdata = {
&tmu4_device,
&tmu5_device,
&rtc_device,
+ &sci_device,
&dma_device,
};
@@ -278,9 +271,8 @@ static int __init sh7780_devices_setup(void)
ARRAY_SIZE(sh7780_devices));
}
arch_initcall(sh7780_devices_setup);
+
static struct platform_device *sh7780_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index ef26ebda6e8b..7f6c718b6c36 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -16,102 +16,6 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffea0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffeb0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 44, 44, 44 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffec0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 60, 60, 60, 60 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
- },
-};
-
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xffed0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xffee0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xffef0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
- .clk = "scif_fck",
-};
-
-static struct platform_device scif5_device = {
- .name = "sh-sci",
- .id = 5,
- .dev = {
- .platform_data = &scif5_platform_data,
- },
-};
-
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -294,19 +198,64 @@ static struct platform_device tmu5_device = {
.num_resources = ARRAY_SIZE(tmu5_resources),
};
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ .clk = "scif_fck",
+ }, {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+ .clk = "scif_fck",
+ }, {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 60, 60, 60, 60 },
+ .clk = "scif_fck",
+ }, {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+ .clk = "scif_fck",
+ }, {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+ .clk = "scif_fck",
+ }, {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+ .clk = "scif_fck",
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
static struct platform_device *sh7785_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
&tmu4_device,
&tmu5_device,
+ &sci_device,
};
static int __init sh7785_devices_setup(void)
@@ -317,12 +266,6 @@ static int __init sh7785_devices_setup(void)
arch_initcall(sh7785_devices_setup);
static struct platform_device *sh7785_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 71673487ace0..0104a8ec5369 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -23,96 +23,51 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffea0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-/*
- * The rest of these all have multiplexed IRQs
- */
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffeb0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 44, 44, 44 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffec0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 50, 50, 50, 50 },
-};
-
-static struct platform_device scif2_device = {
- .name = "sh-sci",
- .id = 2,
- .dev = {
- .platform_data = &scif2_platform_data,
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
},
+ /*
+ * The rest of these all have multiplexed IRQs
+ */
+ {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+ }, {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 50, 50, 50, 50 },
+ }, {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 51, 51, 51, 51 },
+ }, {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+ }, {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 53, 53, 53, 53 },
+ }, {
+ .flags = 0,
+ }
};
-static struct plat_sci_port scif3_platform_data = {
- .mapbase = 0xffed0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 51, 51, 51, 51 },
-};
-
-static struct platform_device scif3_device = {
- .name = "sh-sci",
- .id = 3,
- .dev = {
- .platform_data = &scif3_platform_data,
- },
-};
-
-static struct plat_sci_port scif4_platform_data = {
- .mapbase = 0xffee0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52, 52 },
-};
-
-static struct platform_device scif4_device = {
- .name = "sh-sci",
- .id = 4,
- .dev = {
- .platform_data = &scif4_platform_data,
- },
-};
-
-static struct plat_sci_port scif5_platform_data = {
- .mapbase = 0xffef0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 53, 53, 53, 53 },
-};
-
-static struct platform_device scif5_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 5,
+ .id = -1,
.dev = {
- .platform_data = &scif5_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -504,12 +459,6 @@ static struct platform_device usb_ohci_device = {
};
static struct platform_device *sh7786_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
- &scif3_device,
- &scif4_device,
- &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -525,6 +474,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
};
static struct platform_device *sh7786_devices[] __initdata = {
+ &sci_device,
&usb_ohci_device,
};
diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 780ba17a5599..c7ba9166e18a 100644
--- a/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -24,48 +24,32 @@
* silicon in the first place, we just refuse to deal with the port at
* all rather than adding infrastructure to hack around it.
*/
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = 0xffc30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct plat_sci_port scif1_platform_data = {
- .mapbase = 0xffc40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 45, 47, 46 },
-};
-
-static struct platform_device scif1_device = {
- .name = "sh-sci",
- .id = 1,
- .dev = {
- .platform_data = &scif1_platform_data,
- },
-};
-
-static struct plat_sci_port scif2_platform_data = {
- .mapbase = 0xffc60000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffc30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .mapbase = 0xffc40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 45, 47, 46 },
+ }, {
+ .mapbase = 0xffc60000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif2_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 2,
+ .id = -1,
.dev = {
- .platform_data = &scif2_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -252,9 +236,6 @@ static struct platform_device tmu5_device = {
};
static struct platform_device *shx3_early_devices[] __initdata = {
- &scif0_device,
- &scif1_device,
- &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -263,10 +244,21 @@ static struct platform_device *shx3_early_devices[] __initdata = {
&tmu5_device,
};
+static struct platform_device *shx3_devices[] __initdata = {
+ &sci_device,
+};
+
static int __init shx3_devices_setup(void)
{
- return platform_add_devices(shx3_early_devices,
+ int ret;
+
+ ret = platform_add_devices(shx3_early_devices,
ARRAY_SIZE(shx3_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(shx3_devices,
+ ARRAY_SIZE(shx3_devices));
}
arch_initcall(shx3_devices_setup);
diff --git a/trunk/arch/sh/kernel/cpu/sh5/fpu.c b/trunk/arch/sh/kernel/cpu/sh5/fpu.c
index 4648ccee6c4d..dd4f51ffb50e 100644
--- a/trunk/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/trunk/arch/sh/kernel/cpu/sh5/fpu.c
@@ -34,7 +34,7 @@ static union sh_fpu_union init_fpuregs = {
}
};
-void save_fpu(struct task_struct *tsk)
+void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
@@ -153,7 +153,7 @@ do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
enable_fpu();
if (last_task_used_math != NULL)
/* Other processes fpu state, save away */
- save_fpu(last_task_used_math);
+ save_fpu(last_task_used_math, regs);
last_task_used_math = current;
if (used_math()) {
diff --git a/trunk/arch/sh/kernel/cpu/sh5/setup-sh5.c b/trunk/arch/sh/kernel/cpu/sh5/setup-sh5.c
index e7a3c1e4b604..6a0f82f70032 100644
--- a/trunk/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/trunk/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -16,18 +16,22 @@
#include
#include
-static struct plat_sci_port scif0_platform_data = {
- .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
- .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
- .type = PORT_SCIF,
- .irqs = { 39, 40, 42, 0 },
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+ .type = PORT_SCIF,
+ .irqs = { 39, 40, 42, 0 },
+ }, {
+ .flags = 0,
+ }
};
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
.name = "sh-sci",
- .id = 0,
+ .id = -1,
.dev = {
- .platform_data = &scif0_platform_data,
+ .platform_data = sci_platform_data,
},
};
@@ -160,13 +164,13 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh5_early_devices[] __initdata = {
- &scif0_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
static struct platform_device *sh5_devices[] __initdata = {
+ &sci_device,
&rtc_device,
};
diff --git a/trunk/arch/sh/kernel/early_printk.c b/trunk/arch/sh/kernel/early_printk.c
index f8bb50c6e050..81a46145ffa5 100644
--- a/trunk/arch/sh/kernel/early_printk.c
+++ b/trunk/arch/sh/kernel/early_printk.c
@@ -15,6 +15,7 @@
#include
#include
+#ifdef CONFIG_SH_STANDARD_BIOS
#include
/*
@@ -56,8 +57,149 @@ static struct console bios_console = {
.flags = CON_PRINTBUFFER,
.index = -1,
};
+#endif
-static struct console *early_console;
+#ifdef CONFIG_EARLY_SCIF_CONSOLE
+#include
+#include "../../../drivers/serial/sh-sci.h"
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721)
+#define EPK_SCSMR_VALUE 0x000
+#define EPK_SCBRR_VALUE 0x00C
+#define EPK_FIFO_SIZE 64
+#define EPK_FIFO_BITS (0x7f00 >> 8)
+#else
+#define EPK_FIFO_SIZE 16
+#define EPK_FIFO_BITS (0x1f00 >> 8)
+#endif
+
+static struct uart_port scif_port = {
+ .type = PORT_SCIF,
+ .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
+ .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
+};
+
+static void scif_sercon_putc(int c)
+{
+ while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE))
+ ;
+
+ sci_in(&scif_port, SCxSR);
+ sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
+ sci_out(&scif_port, SCxTDR, c);
+
+ while ((sci_in(&scif_port, SCxSR) & 0x40) == 0)
+ ;
+
+ if (c == '\n')
+ scif_sercon_putc('\r');
+}
+
+static void scif_sercon_write(struct console *con, const char *s,
+ unsigned count)
+{
+ while (count-- > 0)
+ scif_sercon_putc(*s++);
+}
+
+static int __init scif_sercon_setup(struct console *con, char *options)
+{
+ con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
+
+ return 0;
+}
+
+static struct console scif_console = {
+ .name = "sercon",
+ .write = scif_sercon_write,
+ .setup = scif_sercon_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+#if !defined(CONFIG_SH_STANDARD_BIOS)
+#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721)
+static void scif_sercon_init(char *s)
+{
+ sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
+ sci_out(&scif_port, SCFCR, 0x4006); /* reset */
+ sci_out(&scif_port, SCSCR, 0x0000); /* select internal clock */
+ sci_out(&scif_port, SCSMR, EPK_SCSMR_VALUE);
+ sci_out(&scif_port, SCBRR, EPK_SCBRR_VALUE);
+
+ mdelay(1); /* wait 1-bit time */
+
+ sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
+ sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
+}
+#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
+#define DEFAULT_BAUD 115200
+/*
+ * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
+ * devices that aren't using sh-ipl+g.
+ */
+static void scif_sercon_init(char *s)
+{
+ struct uart_port *port = &scif_port;
+ unsigned baud = DEFAULT_BAUD;
+ unsigned int status;
+ char *e;
+
+ if (*s == ',')
+ ++s;
+
+ if (*s) {
+ /* ignore ioport/device name */
+ s += strcspn(s, ",");
+ if (*s == ',')
+ s++;
+ }
+
+ if (*s) {
+ baud = simple_strtoul(s, &e, 0);
+ if (baud == 0 || s == e)
+ baud = DEFAULT_BAUD;
+ }
+
+ do {
+ status = sci_in(port, SCxSR);
+ } while (!(status & SCxSR_TEND(port)));
+
+ sci_out(port, SCSCR, 0); /* TE=0, RE=0 */
+ sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_out(port, SCSMR, 0);
+
+ /* Set baud rate */
+ sci_out(port, SCBRR, (CONFIG_SH_PCLK_FREQ + 16 * baud) /
+ (32 * baud) - 1);
+ udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
+
+ sci_out(port, SCSPTR, 0);
+ sci_out(port, SCxSR, 0x60);
+ sci_out(port, SCLSR, 0);
+
+ sci_out(port, SCFCR, 0);
+ sci_out(port, SCSCR, 0x30); /* TE=1, RE=1 */
+}
+#endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */
+#endif /* !defined(CONFIG_SH_STANDARD_BIOS) */
+#endif /* CONFIG_EARLY_SCIF_CONSOLE */
+
+/*
+ * Setup a default console, if more than one is compiled in, rely on the
+ * earlyprintk= parsing to give priority.
+ */
+static struct console *early_console =
+#ifdef CONFIG_SH_STANDARD_BIOS
+ &bios_console
+#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
+ &scif_console
+#else
+ NULL
+#endif
+ ;
static int __init setup_early_printk(char *buf)
{
@@ -69,8 +211,21 @@ static int __init setup_early_printk(char *buf)
if (strstr(buf, "keep"))
keep_early = 1;
+#ifdef CONFIG_SH_STANDARD_BIOS
if (!strncmp(buf, "bios", 4))
early_console = &bios_console;
+#endif
+#if defined(CONFIG_EARLY_SCIF_CONSOLE)
+ if (!strncmp(buf, "serial", 6)) {
+ early_console = &scif_console;
+
+#if !defined(CONFIG_SH_STANDARD_BIOS)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
+ scif_sercon_init(buf + 6);
+#endif
+#endif
+ }
+#endif
if (likely(early_console)) {
if (keep_early)
diff --git a/trunk/arch/sh/kernel/ftrace.c b/trunk/arch/sh/kernel/ftrace.c
index a48cdedc73b5..b6f41c109beb 100644
--- a/trunk/arch/sh/kernel/ftrace.c
+++ b/trunk/arch/sh/kernel/ftrace.c
@@ -401,10 +401,82 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
extern unsigned long *sys_call_table;
-unsigned long __init arch_syscall_addr(int nr)
+static struct syscall_metadata **syscalls_metadata;
+
+static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
+{
+ struct syscall_metadata *start;
+ struct syscall_metadata *stop;
+ char str[KSYM_SYMBOL_LEN];
+
+
+ start = (struct syscall_metadata *)__start_syscalls_metadata;
+ stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+ kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
+
+ for ( ; start < stop; start++) {
+ if (start->name && !strcmp(start->name, str))
+ return start;
+ }
+
+ return NULL;
+}
+
+struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+ if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
+ return NULL;
+
+ return syscalls_metadata[nr];
+}
+
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+ for (i = 0; i < NR_syscalls; i++)
+ if (syscalls_metadata[i])
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
+static int __init arch_init_ftrace_syscalls(void)
{
- return (unsigned long)sys_call_table[nr];
+ int i;
+ struct syscall_metadata *meta;
+ unsigned long **psys_syscall_table = &sys_call_table;
+
+ syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
+ FTRACE_SYSCALL_MAX, GFP_KERNEL);
+ if (!syscalls_metadata) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
+ meta = find_syscall_meta(psys_syscall_table[i]);
+ syscalls_metadata[i] = meta;
+ }
+
+ return 0;
}
+arch_initcall(arch_init_ftrace_syscalls);
#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/trunk/arch/sh/kernel/process_64.c b/trunk/arch/sh/kernel/process_64.c
index 31f80c61b031..359b8a2f4d2e 100644
--- a/trunk/arch/sh/kernel/process_64.c
+++ b/trunk/arch/sh/kernel/process_64.c
@@ -404,7 +404,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(tsk);
+ save_fpu(tsk, regs);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
@@ -431,7 +431,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_SH_FPU
if(last_task_used_math == current) {
enable_fpu();
- save_fpu(current);
+ save_fpu(current, regs);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/trunk/arch/sh/kernel/ptrace_64.c b/trunk/arch/sh/kernel/ptrace_64.c
index 873ebdc4f98e..952da83903da 100644
--- a/trunk/arch/sh/kernel/ptrace_64.c
+++ b/trunk/arch/sh/kernel/ptrace_64.c
@@ -82,7 +82,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task);
+ save_fpu(task, regs);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
@@ -118,7 +118,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
set_stopped_child_used_math(task);
} else if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task);
+ save_fpu(task, regs);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c
index 8b0e69792cf4..5a947a2567e4 100644
--- a/trunk/arch/sh/kernel/setup.c
+++ b/trunk/arch/sh/kernel/setup.c
@@ -423,9 +423,6 @@ void __init setup_arch(char **cmdline_p)
plat_early_device_setup();
- /* Let earlyprintk output early console messages */
- early_platform_driver_probe("earlyprintk", 1, 1);
-
sh_mv_setup();
/*
diff --git a/trunk/arch/sh/kernel/signal_64.c b/trunk/arch/sh/kernel/signal_64.c
index ce76dbdef294..feb3dddd3192 100644
--- a/trunk/arch/sh/kernel/signal_64.c
+++ b/trunk/arch/sh/kernel/signal_64.c
@@ -314,7 +314,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(current);
+ save_fpu(current, regs);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/trunk/arch/sh/kernel/syscalls_32.S b/trunk/arch/sh/kernel/syscalls_32.S
index 4bd5a1146956..19fd11dd9871 100644
--- a/trunk/arch/sh/kernel/syscalls_32.S
+++ b/trunk/arch/sh/kernel/syscalls_32.S
@@ -353,4 +353,3 @@ ENTRY(sys_call_table)
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
- .long sys_recvmmsg
diff --git a/trunk/arch/sh/kernel/traps_32.c b/trunk/arch/sh/kernel/traps_32.c
index 86639beac3a2..3da5a125d884 100644
--- a/trunk/arch/sh/kernel/traps_32.c
+++ b/trunk/arch/sh/kernel/traps_32.c
@@ -452,18 +452,12 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
rm = regs->regs[index];
/* shout about fixups */
- if (!expected) {
- if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
- pr_notice("Fixing up unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- else if (se_kernmode_warn && printk_ratelimit())
- pr_notice("Fixing up unaligned kernel access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- }
+ if (!expected && printk_ratelimit())
+ printk(KERN_NOTICE "Fixing up unaligned %s access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ user_mode(regs) ? "userspace" : "kernel",
+ current->comm, task_pid_nr(current),
+ (void *)regs->pc, instruction);
ret = -EFAULT;
switch (instruction&0xF000) {
diff --git a/trunk/arch/sh/kernel/traps_64.c b/trunk/arch/sh/kernel/traps_64.c
index d86f5315a0c1..75c0cbe2eda0 100644
--- a/trunk/arch/sh/kernel/traps_64.c
+++ b/trunk/arch/sh/kernel/traps_64.c
@@ -600,7 +600,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current);
+ save_fpu(current, regs);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
@@ -673,7 +673,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current);
+ save_fpu(current, regs);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/trunk/arch/sh/mm/cache-sh4.c b/trunk/arch/sh/mm/cache-sh4.c
index 560ddb6bc8a7..f36a08bf3d5c 100644
--- a/trunk/arch/sh/mm/cache-sh4.c
+++ b/trunk/arch/sh/mm/cache-sh4.c
@@ -256,7 +256,8 @@ static void sh4_flush_cache_page(void *args)
address = (unsigned long)vaddr;
}
- flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ if (pages_do_alias(address, phys))
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
(address & shm_align_mask), phys);
if (vma->vm_flags & VM_EXEC)
diff --git a/trunk/arch/sh/mm/ioremap_32.c b/trunk/arch/sh/mm/ioremap_32.c
index 2141befb4f91..a86eaa9d75a5 100644
--- a/trunk/arch/sh/mm/ioremap_32.c
+++ b/trunk/arch/sh/mm/ioremap_32.c
@@ -33,10 +33,10 @@
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
- unsigned long flags, void *caller)
+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
{
- struct vm_struct *area;
+ struct vm_struct * area;
unsigned long offset, last_addr, addr, orig_addr;
pgprot_t pgprot;
@@ -67,7 +67,7 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
/*
* Ok, go for it..
*/
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
area->phys_addr = phys_addr;
@@ -103,7 +103,7 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
return (void __iomem *)(offset + (char *)orig_addr);
}
-EXPORT_SYMBOL(__ioremap_caller);
+EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
diff --git a/trunk/arch/sh/mm/ioremap_64.c b/trunk/arch/sh/mm/ioremap_64.c
index ef434657d428..b16843d02b76 100644
--- a/trunk/arch/sh/mm/ioremap_64.c
+++ b/trunk/arch/sh/mm/ioremap_64.c
@@ -258,15 +258,15 @@ static void shmedia_unmapioaddr(unsigned long vaddr)
pte_clear(&init_mm, vaddr, ptep);
}
-void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
- unsigned long flags, void *caller)
+void __iomem *__ioremap(unsigned long offset, unsigned long size,
+ unsigned long flags)
{
char name[14];
sprintf(name, "phys_%08x", (u32)offset);
return shmedia_alloc_io(offset, size, name, flags);
}
-EXPORT_SYMBOL(__ioremap_caller);
+EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *virtual)
{
diff --git a/trunk/arch/sh/mm/numa.c b/trunk/arch/sh/mm/numa.c
index 422e92721878..6c524446c0f6 100644
--- a/trunk/arch/sh/mm/numa.c
+++ b/trunk/arch/sh/mm/numa.c
@@ -28,7 +28,7 @@ void __init setup_memory(void)
{
unsigned long free_pfn = PFN_UP(__pa(_end));
u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - base;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn;
lmb_add(base, size);
@@ -37,15 +37,6 @@ void __init setup_memory(void)
(PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
-
- lmb_analyze();
- lmb_dump_all();
-
/*
* Node 0 sets up its pgdat at the first available pfn,
* and bumps it up before setting up the bootmem allocator.
@@ -80,7 +71,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local pgdat */
NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, end));
+ SMP_CACHE_BYTES, end_pfn));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
@@ -90,7 +81,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local bootmap */
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
- PAGE_SIZE, end);
+ PAGE_SIZE, end_pfn);
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
start_pfn, end_pfn);
diff --git a/trunk/arch/sparc/include/asm/elf_32.h b/trunk/arch/sparc/include/asm/elf_32.h
index 4269ca6ad18a..381a1b5256d6 100644
--- a/trunk/arch/sparc/include/asm/elf_32.h
+++ b/trunk/arch/sparc/include/asm/elf_32.h
@@ -104,6 +104,8 @@ typedef struct {
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
+#define USE_ELF_CORE_DUMP
+
#define ELF_EXEC_PAGESIZE 4096
diff --git a/trunk/arch/sparc/include/asm/elf_64.h b/trunk/arch/sparc/include/asm/elf_64.h
index ff66bb88537b..d42e393078c4 100644
--- a/trunk/arch/sparc/include/asm/elf_64.h
+++ b/trunk/arch/sparc/include/asm/elf_64.h
@@ -152,6 +152,7 @@ typedef struct {
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/sparc/kernel/iommu.c b/trunk/arch/sparc/kernel/iommu.c
index 5fad94950e76..7690cc219ecc 100644
--- a/trunk/arch/sparc/kernel/iommu.c
+++ b/trunk/arch/sparc/kernel/iommu.c
@@ -11,7 +11,6 @@
#include
#include
#include
-#include
#ifdef CONFIG_PCI
#include
@@ -170,7 +169,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- bitmap_clear(arena->map, entry, npages);
+ iommu_area_free(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
diff --git a/trunk/arch/sparc/kernel/ldc.c b/trunk/arch/sparc/kernel/ldc.c
index df39a0f0d27a..e0ba898e30cf 100644
--- a/trunk/arch/sparc/kernel/ldc.c
+++ b/trunk/arch/sparc/kernel/ldc.c
@@ -14,7 +14,6 @@
#include
#include
#include
-#include
#include
#include
@@ -1876,7 +1875,7 @@ EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
- unsigned long n, start, end, limit;
+ unsigned long n, i, start, end, limit;
int pass;
limit = arena->limit;
@@ -1884,7 +1883,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
pass = 0;
again:
- n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
+ n = find_next_zero_bit(arena->map, limit, start);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
@@ -1897,7 +1896,16 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
return -1;
}
}
- bitmap_set(arena->map, n, npages);
+
+ for (i = n; i < end; i++) {
+ if (test_bit(i, arena->map)) {
+ start = i + 1;
+ goto again;
+ }
+ }
+
+ for (i = n; i < end; i++)
+ __set_bit(i, arena->map);
arena->hint = end;
diff --git a/trunk/arch/sparc/mm/sun4c.c b/trunk/arch/sparc/mm/sun4c.c
index a89baf0d875a..2ffacd67c424 100644
--- a/trunk/arch/sparc/mm/sun4c.c
+++ b/trunk/arch/sparc/mm/sun4c.c
@@ -17,7 +17,6 @@
#include
#include
#include
-#include
#include
#include
@@ -1022,12 +1021,20 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ scan = 0;
local_irq_save(flags);
- base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
- 0, npages, 0);
- if (base >= iobuffer_map_size)
- goto abend;
+ for (;;) {
+ scan = find_next_zero_bit(sun4c_iobuffer_map,
+ iobuffer_map_size, scan);
+ if ((base = scan) + npages > iobuffer_map_size) goto abend;
+ for (;;) {
+ if (scan >= base + npages) goto found;
+ if (test_bit(scan, sun4c_iobuffer_map)) break;
+ scan++;
+ }
+ }
+found:
high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
high = SUN4C_REAL_PGDIR_ALIGN(high);
while (high > sun4c_iobuffer_high) {
diff --git a/trunk/arch/um/sys-i386/asm/elf.h b/trunk/arch/um/sys-i386/asm/elf.h
index 770885472ed4..d0da9d7c5371 100644
--- a/trunk/arch/um/sys-i386/asm/elf.h
+++ b/trunk/arch/um/sys-i386/asm/elf.h
@@ -48,6 +48,7 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EAX(regs) = 0; \
} while (0)
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/trunk/arch/um/sys-ppc/asm/elf.h b/trunk/arch/um/sys-ppc/asm/elf.h
index 8aacaf56508d..af9463cd8ce5 100644
--- a/trunk/arch/um/sys-ppc/asm/elf.h
+++ b/trunk/arch/um/sys-ppc/asm/elf.h
@@ -17,6 +17,8 @@ extern long elf_aux_hwcap;
#define ELF_CLASS ELFCLASS32
#endif
+#define USE_ELF_CORE_DUMP
+
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
diff --git a/trunk/arch/um/sys-x86_64/asm/elf.h b/trunk/arch/um/sys-x86_64/asm/elf.h
index 49655c83efd2..04b9e87c8dad 100644
--- a/trunk/arch/um/sys-x86_64/asm/elf.h
+++ b/trunk/arch/um/sys-x86_64/asm/elf.h
@@ -104,6 +104,7 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
clear_thread_flag(TIF_IA32);
#endif
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/trunk/arch/x86/include/asm/dma-mapping.h b/trunk/arch/x86/include/asm/dma-mapping.h
index ac91eed21061..0f6c02f3b7d4 100644
--- a/trunk/arch/x86/include/asm/dma-mapping.h
+++ b/trunk/arch/x86/include/asm/dma-mapping.h
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size - 1 <= *dev->dma_mask;
+ return addr + size <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/trunk/arch/x86/include/asm/elf.h b/trunk/arch/x86/include/asm/elf.h
index b4501ee223ad..8a024babe5e6 100644
--- a/trunk/arch/x86/include/asm/elf.h
+++ b/trunk/arch/x86/include/asm/elf.h
@@ -239,6 +239,7 @@ extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/trunk/arch/x86/include/asm/ptrace.h b/trunk/arch/x86/include/asm/ptrace.h
index 9d369f680321..3d11fd0f44c5 100644
--- a/trunk/arch/x86/include/asm/ptrace.h
+++ b/trunk/arch/x86/include/asm/ptrace.h
@@ -292,8 +292,6 @@ extern void user_enable_block_step(struct task_struct *);
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
-#define ARCH_HAS_USER_SINGLE_STEP_INFO
-
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
diff --git a/trunk/arch/x86/include/asm/uv/bios.h b/trunk/arch/x86/include/asm/uv/bios.h
index 2751f3075d8b..7ed17ff502b9 100644
--- a/trunk/arch/x86/include/asm/uv/bios.h
+++ b/trunk/arch/x86/include/asm/uv/bios.h
@@ -76,6 +76,15 @@ union partition_info_u {
};
};
+union uv_watchlist_u {
+ u64 val;
+ struct {
+ u64 blade : 16,
+ size : 32,
+ filler : 16;
+ };
+};
+
enum uv_memprotect {
UV_MEMPROT_RESTRICT_ACCESS,
UV_MEMPROT_ALLOW_AMO,
@@ -91,7 +100,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
-extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
+extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
unsigned long *);
extern int uv_bios_mq_watchlist_free(int, int);
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
diff --git a/trunk/arch/x86/include/asm/uv/uv_hub.h b/trunk/arch/x86/include/asm/uv/uv_hub.h
index 811bfabc80b7..d1414af98559 100644
--- a/trunk/arch/x86/include/asm/uv/uv_hub.h
+++ b/trunk/arch/x86/include/asm/uv/uv_hub.h
@@ -172,8 +172,6 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
-#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
-
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
@@ -234,26 +232,6 @@ static inline unsigned long uv_gpa(void *v)
return uv_soc_phys_ram_to_gpa(__pa(v));
}
-/* Top two bits indicate the requested address is in MMR space. */
-static inline int
-uv_gpa_in_mmr_space(unsigned long gpa)
-{
- return (gpa >> 62) == 0x3UL;
-}
-
-/* UV global physical address --> socket phys RAM */
-static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
-{
- unsigned long paddr = gpa & uv_hub_info->gpa_mask;
- unsigned long remap_base = uv_hub_info->lowmem_remap_base;
- unsigned long remap_top = uv_hub_info->lowmem_remap_top;
-
- if (paddr >= remap_base && paddr < remap_base + remap_top)
- paddr -= remap_base;
- return paddr;
-}
-
-
/* gnode -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
@@ -329,15 +307,6 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
return readq(uv_global_mmr64_address(pnode, offset));
}
-/*
- * Global MMR space addresses when referenced by the GRU. (GRU does
- * NOT use socket addressing).
- */
-static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
-{
- return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
-}
-
/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
@@ -465,14 +434,6 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
-static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
-{
- return (1UL << UVH_IPI_INT_SEND_SHFT) |
- ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
- (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
- (vector << UVH_IPI_INT_VECTOR_SHFT);
-}
-
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
{
unsigned long val;
@@ -481,7 +442,10 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
if (vector == NMI_VECTOR)
dmode = dest_NMI;
- val = uv_hub_ipi_value(apicid, vector, dmode);
+ val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+ ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+ (vector << UVH_IPI_INT_VECTOR_SHFT);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c
index 23824fef789c..b990b5cc9541 100644
--- a/trunk/arch/x86/kernel/amd_iommu.c
+++ b/trunk/arch/x86/kernel/amd_iommu.c
@@ -19,7 +19,7 @@
#include
#include
-#include
+#include
#include
#include
#include
@@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
- bitmap_clear(range->bitmap, address, pages);
+ iommu_area_free(range->bitmap, address, pages);
}
diff --git a/trunk/arch/x86/kernel/bios_uv.c b/trunk/arch/x86/kernel/bios_uv.c
index b0206a211b09..63a88e1f987d 100644
--- a/trunk/arch/x86/kernel/bios_uv.c
+++ b/trunk/arch/x86/kernel/bios_uv.c
@@ -101,17 +101,21 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
}
int
-uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
+uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
+ union uv_watchlist_u size_blade;
u64 watchlist;
s64 ret;
+ size_blade.size = mq_size;
+ size_blade.blade = blade;
+
/*
* bios returns watchlist number or negative error number.
*/
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
- mq_size, (u64)intr_mmr_offset,
+ size_blade.val, (u64)intr_mmr_offset,
(u64)&watchlist, 0);
if (ret < BIOS_STATUS_SUCCESS)
return ret;
diff --git a/trunk/arch/x86/kernel/pci-calgary_64.c b/trunk/arch/x86/kernel/pci-calgary_64.c
index 2bbde6078143..c563e4c8ff39 100644
--- a/trunk/arch/x86/kernel/pci-calgary_64.c
+++ b/trunk/arch/x86/kernel/pci-calgary_64.c
@@ -31,7 +31,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_lock_irqsave(&tbl->it_lock, flags);
- bitmap_set(tbl->it_map, index, npages);
+ iommu_area_reserve(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_lock_irqsave(&tbl->it_lock, flags);
- bitmap_clear(tbl->it_map, entry, npages);
+ iommu_area_free(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
diff --git a/trunk/arch/x86/kernel/pci-gart_64.c b/trunk/arch/x86/kernel/pci-gart_64.c
index 34de53b46f87..56c0e730d3fe 100644
--- a/trunk/arch/x86/kernel/pci-gart_64.c
+++ b/trunk/arch/x86/kernel/pci-gart_64.c
@@ -23,7 +23,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- bitmap_clear(iommu_gart_bitmap, offset, size);
+ iommu_area_free(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
- bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c
index 2779321046bd..7079ddaf0731 100644
--- a/trunk/arch/x86/kernel/ptrace.c
+++ b/trunk/arch/x86/kernel/ptrace.c
@@ -1676,33 +1676,21 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
-static void fill_sigtrap_info(struct task_struct *tsk,
- struct pt_regs *regs,
- int error_code, int si_code,
- struct siginfo *info)
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code)
{
+ struct siginfo info;
+
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
- memset(info, 0, sizeof(*info));
- info->si_signo = SIGTRAP;
- info->si_code = si_code;
- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
-}
+ memset(&info, 0, sizeof(info));
+ info.si_signo = SIGTRAP;
+ info.si_code = si_code;
-void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs,
- struct siginfo *info)
-{
- fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
-}
+ /* User-mode ip? */
+ info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- int error_code, int si_code)
-{
- struct siginfo info;
-
- fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
@@ -1767,22 +1755,29 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
- bool step;
-
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
+
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
- * syscall_trace_enter().
+ * syscall_trace_enter(), so don't do any more now.
+ */
+ if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
+ return;
+
+ /*
+ * If we are single-stepping, synthesize a trap to follow the
+ * system call instruction.
*/
- step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
- !test_thread_flag(TIF_SYSCALL_EMU);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
+ if (test_thread_flag(TIF_SINGLESTEP) &&
+ tracehook_consider_fatal_signal(current, SIGTRAP))
+ send_sigtrap(current, regs, 0, TRAP_BRKPT);
}
diff --git a/trunk/arch/xtensa/include/asm/elf.h b/trunk/arch/xtensa/include/asm/elf.h
index 5eb6d695e987..c3f53e755ca5 100644
--- a/trunk/arch/xtensa/include/asm/elf.h
+++ b/trunk/arch/xtensa/include/asm/elf.h
@@ -123,6 +123,7 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
+#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/trunk/block/blk-settings.c b/trunk/block/blk-settings.c
index dd1f1e0e196f..6ae118d6e193 100644
--- a/trunk/block/blk-settings.c
+++ b/trunk/block/blk-settings.c
@@ -554,11 +554,18 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ /*
+ * Temporarily disable discard granularity. It's currently buggy
+ * since we default to 0 for discard_granularity, hence this
+ * "failure" will always trigger for non-zero offsets.
+ */
+#if 0
if (offset &&
(offset & (b->discard_granularity - 1)) != b->discard_alignment) {
t->discard_misaligned = 1;
ret = -1;
}
+#endif
/* If top has no alignment offset, inherit from bottom */
if (!t->alignment_offset)
diff --git a/trunk/drivers/atm/iphase.c b/trunk/drivers/atm/iphase.c
index 25a4c86f839b..f734b345ac71 100644
--- a/trunk/drivers/atm/iphase.c
+++ b/trunk/drivers/atm/iphase.c
@@ -557,7 +557,7 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
} /* while */
// Move this VCI number into this location of the CBR Sched table.
- memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
+ memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
dev->CbrRemEntries--;
toBeAssigned--;
} /* while */
diff --git a/trunk/drivers/char/efirtc.c b/trunk/drivers/char/efirtc.c
index 53c524e7b829..26a47dc88f61 100644
--- a/trunk/drivers/char/efirtc.c
+++ b/trunk/drivers/char/efirtc.c
@@ -285,7 +285,6 @@ static const struct file_operations efi_rtc_fops = {
.unlocked_ioctl = efi_rtc_ioctl,
.open = efi_rtc_open,
.release = efi_rtc_close,
- .llseek = no_llseek,
};
static struct miscdevice efi_rtc_dev= {
diff --git a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
index cf82fedae099..80704875794c 100644
--- a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -370,7 +370,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_IDLE;
case KCS_START_OP:
- if (state != KCS_IDLE_STATE) {
+ if (state != KCS_IDLE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;
diff --git a/trunk/drivers/char/keyboard.c b/trunk/drivers/char/keyboard.c
index f706b1dffdb3..5619007e7e05 100644
--- a/trunk/drivers/char/keyboard.c
+++ b/trunk/drivers/char/keyboard.c
@@ -233,8 +233,7 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
}
/*
- * Making beeps and bells. Note that we prefer beeps to bells, but when
- * shutting the sound off we do both.
+ * Making beeps and bells.
*/
static int kd_sound_helper(struct input_handle *handle, void *data)
@@ -243,12 +242,9 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
struct input_dev *dev = handle->dev;
if (test_bit(EV_SND, dev->evbit)) {
- if (test_bit(SND_TONE, dev->sndbit)) {
+ if (test_bit(SND_TONE, dev->sndbit))
input_inject_event(handle, EV_SND, SND_TONE, *hz);
- if (*hz)
- return 0;
- }
- if (test_bit(SND_BELL, dev->sndbit))
+ if (test_bit(SND_BELL, handle->dev->sndbit))
input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
}
diff --git a/trunk/drivers/char/sysrq.c b/trunk/drivers/char/sysrq.c
index 1ae2de7d8b4f..44203ff599da 100644
--- a/trunk/drivers/char/sysrq.c
+++ b/trunk/drivers/char/sysrq.c
@@ -339,7 +339,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback);
diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c
index 50faa1fb0f06..e43fbc66aef0 100644
--- a/trunk/drivers/char/vt.c
+++ b/trunk/drivers/char/vt.c
@@ -164,9 +164,6 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
-static int cur_default = CUR_DEFAULT;
-module_param(cur_default, int, S_IRUGO | S_IWUSR);
-
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
@@ -1639,7 +1636,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
- vc->vc_cursor_type = cur_default;
+ vc->vc_cursor_type = CUR_DEFAULT;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
@@ -1841,7 +1838,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
- vc->vc_cursor_type = cur_default;
+ vc->vc_cursor_type = CUR_DEFAULT;
return;
}
break;
diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig
index e02d74b1e892..eb140ff38c27 100644
--- a/trunk/drivers/dma/Kconfig
+++ b/trunk/drivers/dma/Kconfig
@@ -111,24 +111,6 @@ config SH_DMAE
help
Enable support for the Renesas SuperH DMA controllers.
-config COH901318
- bool "ST-Ericsson COH901318 DMA support"
- select DMA_ENGINE
- depends on ARCH_U300
- help
- Enable support for ST-Ericsson COH 901 318 DMA.
-
-config AMCC_PPC440SPE_ADMA
- tristate "AMCC PPC440SPe ADMA support"
- depends on 440SPe || 440SP
- select DMA_ENGINE
- select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
- help
- Enable support for the AMCC PPC440SPe RAID engines.
-
-config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
- bool
-
config DMA_ENGINE
bool
diff --git a/trunk/drivers/dma/Makefile b/trunk/drivers/dma/Makefile
index 807053d48232..eca71ba78ae9 100644
--- a/trunk/drivers/dma/Makefile
+++ b/trunk/drivers/dma/Makefile
@@ -10,5 +10,3 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
-obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
diff --git a/trunk/drivers/dma/coh901318.c b/trunk/drivers/dma/coh901318.c
deleted file mode 100644
index 4a99cd94536b..000000000000
--- a/trunk/drivers/dma/coh901318.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-/*
- * driver/dma/coh901318.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * DMA driver for COH 901 318
- * Author: Per Friden
- */
-
-#include
-#include
-#include /* printk() */
-#include /* everything... */
-#include /* kmalloc() */
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "coh901318_lli.h"
-
-#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
-
-#ifdef VERBOSE_DEBUG
-#define COH_DBG(x) ({ if (1) x; 0; })
-#else
-#define COH_DBG(x) ({ if (0) x; 0; })
-#endif
-
-struct coh901318_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
- struct scatterlist *sg;
- unsigned int sg_len;
- struct coh901318_lli *data;
- enum dma_data_direction dir;
- int pending_irqs;
- unsigned long flags;
-};
-
-struct coh901318_base {
- struct device *dev;
- void __iomem *virtbase;
- struct coh901318_pool pool;
- struct powersave pm;
- struct dma_device dma_slave;
- struct dma_device dma_memcpy;
- struct coh901318_chan *chans;
- struct coh901318_platform *platform;
-};
-
-struct coh901318_chan {
- spinlock_t lock;
- int allocated;
- int completed;
- int id;
- int stopped;
-
- struct work_struct free_work;
- struct dma_chan chan;
-
- struct tasklet_struct tasklet;
-
- struct list_head active;
- struct list_head queue;
- struct list_head free;
-
- unsigned long nbr_active_done;
- unsigned long busy;
- int pending_irqs;
-
- struct coh901318_base *base;
-};
-
-static void coh901318_list_print(struct coh901318_chan *cohc,
- struct coh901318_lli *lli)
-{
- struct coh901318_lli *l;
- dma_addr_t addr = virt_to_phys(lli);
- int i = 0;
-
- while (addr) {
- l = phys_to_virt(addr);
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x link_virt 0x%p\n",
- i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, phys_to_virt(l->link_addr));
- i++;
- addr = l->link_addr;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
-
-static struct coh901318_base *debugfs_dma_base;
-static struct dentry *dma_dentry;
-
-static int coh901318_debugfs_open(struct inode *inode, struct file *file)
-{
-
- file->private_data = inode->i_private;
- return 0;
-}
-
-static int coh901318_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *f_pos)
-{
- u64 started_channels = debugfs_dma_base->pm.started_channels;
- int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
- int i;
- int ret = 0;
- char *dev_buf;
- char *tmp;
- int dev_size;
-
- dev_buf = kmalloc(4*1024, GFP_KERNEL);
- if (dev_buf == NULL)
- goto err_kmalloc;
- tmp = dev_buf;
-
- tmp += sprintf(tmp, "DMA -- enable dma channels\n");
-
- for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
- if (started_channels & (1 << i))
- tmp += sprintf(tmp, "channel %d\n", i);
-
- tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
- dev_size = tmp - dev_buf;
-
- /* No more to read if offset != 0 */
- if (*f_pos > dev_size)
- goto out;
-
- if (count > dev_size - *f_pos)
- count = dev_size - *f_pos;
-
- if (copy_to_user(buf, dev_buf + *f_pos, count))
- ret = -EINVAL;
- ret = count;
- *f_pos += count;
-
- out:
- kfree(dev_buf);
- return ret;
-
- err_kmalloc:
- return 0;
-}
-
-static const struct file_operations coh901318_debugfs_status_operations = {
- .owner = THIS_MODULE,
- .open = coh901318_debugfs_open,
- .read = coh901318_debugfs_read,
-};
-
-
-static int __init init_coh901318_debugfs(void)
-{
-
- dma_dentry = debugfs_create_dir("dma", NULL);
-
- (void) debugfs_create_file("status",
- S_IFREG | S_IRUGO,
- dma_dentry, NULL,
- &coh901318_debugfs_status_operations);
- return 0;
-}
-
-static void __exit exit_coh901318_debugfs(void)
-{
- debugfs_remove_recursive(dma_dentry);
-}
-
-module_init(init_coh901318_debugfs);
-module_exit(exit_coh901318_debugfs);
-#else
-
-#define COH901318_DEBUGFS_ASSIGN(x, y)
-
-#endif /* CONFIG_DEBUG_FS */
-
-static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct coh901318_chan, chan);
-}
-
-static inline dma_addr_t
-cohc_dev_addr(struct coh901318_chan *cohc)
-{
- return cohc->base->platform->chan_conf[cohc->id].dev_addr;
-}
-
-static inline const struct coh901318_params *
-cohc_chan_param(struct coh901318_chan *cohc)
-{
- return &cohc->base->platform->chan_conf[cohc->id].param;
-}
-
-static inline const struct coh_dma_channel *
-cohc_chan_conf(struct coh901318_chan *cohc)
-{
- return &cohc->base->platform->chan_conf[cohc->id];
-}
-
-static void enable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- pm->started_channels &= ~(1ULL << cohc->id);
-
- if (!pm->started_channels) {
- /* DMA no longer intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- false);
- }
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-static void disable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- if (!pm->started_channels) {
- /* DMA intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- true);
- }
-
- pm->started_channels |= (1ULL << cohc->id);
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-
-static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(control,
- virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
- return 0;
-}
-
-static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(conf,
- virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- return 0;
-}
-
-
-static int coh901318_start(struct coh901318_chan *cohc)
-{
- u32 val;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- disable_powersave(cohc);
-
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Enable channel */
- val |= COH901318_CX_CFG_CH_ENABLE;
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- return 0;
-}
-
-static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *data)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- BUG_ON(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*channel) &
- COH901318_CX_STAT_ACTIVE);
-
- writel(data->src_addr,
- virtbase + COH901318_CX_SRC_ADDR +
- COH901318_CX_SRC_ADDR_SPACING * channel);
-
- writel(data->dst_addr, virtbase +
- COH901318_CX_DST_ADDR +
- COH901318_CX_DST_ADDR_SPACING * channel);
-
- writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
- COH901318_CX_LNK_ADDR_SPACING * channel);
-
- writel(data->control, virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
-
- return 0;
-}
-static dma_cookie_t
-coh901318_assign_cookie(struct coh901318_chan *cohc,
- struct coh901318_desc *cohd)
-{
- dma_cookie_t cookie = cohc->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- cohc->chan.cookie = cookie;
- cohd->desc.cookie = cookie;
-
- return cookie;
-}
-
-static struct coh901318_desc *
-coh901318_desc_get(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *desc;
-
- if (list_empty(&cohc->free)) {
- /* alloc new desc because we're out of used ones
- * TODO: alloc a pile of descs instead of just one,
- * avoid many small allocations.
- */
- desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
- if (desc == NULL)
- goto out;
- INIT_LIST_HEAD(&desc->node);
- } else {
- /* Reuse an old desc. */
- desc = list_first_entry(&cohc->free,
- struct coh901318_desc,
- node);
- list_del(&desc->node);
- }
-
- out:
- return desc;
-}
-
-static void
-coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
-{
- list_add_tail(&cohd->node, &cohc->free);
-}
-
-/* call with irq lock held */
-static void
-coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->active);
-
- BUG_ON(cohc->pending_irqs != 0);
-
- cohc->pending_irqs = desc->pending_irqs;
-}
-
-static struct coh901318_desc *
-coh901318_first_active_get(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->active))
- return NULL;
-
- d = list_first_entry(&cohc->active,
- struct coh901318_desc,
- node);
- return d;
-}
-
-static void
-coh901318_desc_remove(struct coh901318_desc *cohd)
-{
- list_del(&cohd->node);
-}
-
-static void
-coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->queue);
-}
-
-static struct coh901318_desc *
-coh901318_first_queued(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->queue))
- return NULL;
-
- d = list_first_entry(&cohc->queue,
- struct coh901318_desc,
- node);
- return d;
-}
-
-/*
- * DMA start/stop controls
- */
-u32 coh901318_get_bytes_left(struct dma_chan *chan)
-{
- unsigned long flags;
- u32 ret;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Read transfer count value */
- ret = readl(cohc->base->virtbase +
- COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(coh901318_get_bytes_left);
-
-
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable channel in HW */
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Stopping infinit transfer */
- if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
- (val & COH901318_CX_CFG_CH_ENABLE))
- cohc->stopped = 1;
-
-
- val &= ~COH901318_CX_CFG_CH_ENABLE;
- /* Enable twice, HW bug work around */
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Spin-wait for it to actually go inactive */
- while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
- channel) & COH901318_CX_STAT_ACTIVE)
- cpu_relax();
-
- /* Check if we stopped an active job */
- if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
- cohc->stopped = 1;
-
- enable_powersave(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-EXPORT_SYMBOL(coh901318_stop);
-
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
- Power save is handled.
-*/
-void coh901318_continue(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- disable_powersave(cohc);
-
- if (cohc->stopped) {
- /* Enable channel in HW */
- val = readl(cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- val |= COH901318_CX_CFG_CH_ENABLE;
-
- writel(val, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
-
- cohc->stopped = 0;
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-EXPORT_SYMBOL(coh901318_continue);
-
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned int ch_nr = (unsigned int) chan_id;
-
- if (ch_nr == to_coh901318_chan(chan)->id)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(coh901318_filter_id);
-
-/*
- * DMA channel allocation
- */
-static int coh901318_config(struct coh901318_chan *cohc,
- struct coh901318_params *param)
-{
- unsigned long flags;
- const struct coh901318_params *p;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- if (param)
- p = param;
- else
- p = &cohc->base->platform->chan_conf[channel].param;
-
- /* Clear any pending BE or TC interrupt */
- if (channel < 32) {
- writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (channel - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (channel - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- coh901318_set_conf(cohc, p->config);
- coh901318_set_ctrl(cohc, p->ctrl_lli_last);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return 0;
-}
-
-/* must lock when calling this function
- * start queued jobs, if any
- * TODO: start all queued jobs in one go
- *
- * Returns descriptor if queued job is started otherwise NULL.
- * If the queue is empty NULL is returned.
- */
-static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *cohd_que;
-
- /* start queued jobs, if any
- * TODO: transmit all queued jobs in one go
- */
- cohd_que = coh901318_first_queued(cohc);
-
- if (cohd_que != NULL) {
- /* Remove from queue */
- coh901318_desc_remove(cohd_que);
- /* initiate DMA job */
- cohc->busy = 1;
-
- coh901318_desc_submit(cohc, cohd_que);
-
- coh901318_prep_linked_list(cohc, cohd_que->data);
-
- /* start dma job */
- coh901318_start(cohc);
-
- }
-
- return cohd_que;
-}
-
-static void dma_tasklet(unsigned long data)
-{
- struct coh901318_chan *cohc = (struct coh901318_chan *) data;
- struct coh901318_desc *cohd_fin;
- unsigned long flags;
- dma_async_tx_callback callback;
- void *callback_param;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* get first active entry from list */
- cohd_fin = coh901318_first_active_get(cohc);
-
- BUG_ON(cohd_fin->pending_irqs == 0);
-
- if (cohd_fin == NULL)
- goto err;
-
- cohd_fin->pending_irqs--;
- cohc->completed = cohd_fin->desc.cookie;
-
- BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
-
- if (cohc->nbr_active_done == 0)
- return;
-
- if (!cohd_fin->pending_irqs) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- }
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
-
- /* callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
-
- if (!cohd_fin->pending_irqs) {
- coh901318_desc_remove(cohd_fin);
-
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd_fin);
- }
-
- if (cohc->nbr_active_done)
- cohc->nbr_active_done--;
-
- if (cohc->nbr_active_done) {
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
- }
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- if (callback)
- callback(callback_param);
-
- return;
-
- err:
- spin_unlock_irqrestore(&cohc->lock, flags);
- dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
-}
-
-
-/* called from interrupt context */
-static void dma_tc_handle(struct coh901318_chan *cohc)
-{
- BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
- list_empty(&cohc->queue)));
-
- if (!cohc->allocated)
- return;
-
- BUG_ON(cohc->pending_irqs == 0);
-
- cohc->pending_irqs--;
- cohc->nbr_active_done++;
-
- if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
- cohc->busy = 0;
-
- BUG_ON(list_empty(&cohc->active));
-
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
-}
-
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- u32 status1;
- u32 status2;
- int i;
- int ch;
- struct coh901318_base *base = dev_id;
- struct coh901318_chan *cohc;
- void __iomem *virtbase = base->virtbase;
-
- status1 = readl(virtbase + COH901318_INT_STATUS1);
- status2 = readl(virtbase + COH901318_INT_STATUS2);
-
- if (unlikely(status1 == 0 && status2 == 0)) {
- dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
- return IRQ_HANDLED;
- }
-
- /* TODO: consider handle IRQ in tasklet here to
- * minimize interrupt latency */
-
- /* Check the first 32 DMA channels for IRQ */
- while (status1) {
- /* Find first bit set, return as a number. */
- i = ffs(status1) - 1;
- ch = i;
-
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status1 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- BUG_ON(1);
- /* Clear BE interrupt */
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS1))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
-
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- /* Check the remaining 32 DMA channels for IRQ */
- while (status2) {
- /* Find first bit set, return as a number. */
- i = ffs(status2) - 1;
- ch = i + 32;
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status2 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- /* Clear BE interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS2))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
- BUG_ON(1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- return IRQ_HANDLED;
-}
-
-static int coh901318_alloc_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
- __func__, cohc->id);
-
- if (chan->client_count > 1)
- return -EBUSY;
-
- coh901318_config(cohc, NULL);
-
- cohc->allocated = 1;
- cohc->completed = chan->cookie = 1;
-
- return 1;
-}
-
-static void
-coh901318_free_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable HW */
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING*channel);
-
- cohc->allocated = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- chan->device->device_terminate_all(chan);
-}
-
-
-static dma_cookie_t
-coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
- desc);
- struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- tx->cookie = coh901318_assign_cookie(cohc, cohd);
-
- coh901318_desc_queue(cohc, cohd);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return tx->cookie;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t size, unsigned long flags)
-{
- struct coh901318_lli *data;
- struct coh901318_desc *cohd;
- unsigned long flg;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int lli_len;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%x dest 0x%x size %d\n",
- __func__, cohc->id, src, dest, size);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- lli_len++;
-
- data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
-
- if (data == NULL)
- goto err;
-
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->data = data;
-
- cohd->pending_irqs =
- coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- cohd->flags = flags;
-
- COH_DBG(coh901318_list_print(cohc, data));
-
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
- cohd->desc.tx_submit = coh901318_tx_submit;
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err:
- spin_unlock_irqrestore(&cohc->lock, flg);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *data;
- struct coh901318_desc *cohd;
- struct scatterlist *sg;
- int len = 0;
- int size;
- int i;
- u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
- u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
- unsigned long flg;
-
- if (!sgl)
- goto out;
- if (sgl->length == 0)
- goto out;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
- __func__, sg_len, direction);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->dir = direction;
-
- if (direction == DMA_TO_DEVICE) {
- u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
-
- ctrl_chained |= tx_flags;
- ctrl_last |= tx_flags;
- ctrl |= tx_flags;
- } else if (direction == DMA_FROM_DEVICE) {
- u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
-
- ctrl_chained |= rx_flags;
- ctrl_last |= rx_flags;
- ctrl |= rx_flags;
- } else
- goto err_direction;
-
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
- cohd->desc.tx_submit = coh901318_tx_submit;
-
-
- /* The dma only supports transmitting packages up to
- * MAX_DMA_PACKET_SIZE. Calculate to total number of
- * dma elemts required to send the entire sg list
- */
- for_each_sg(sgl, sg, sg_len, i) {
- unsigned int factor;
- size = sg_dma_len(sg);
-
- if (size <= MAX_DMA_PACKET_SIZE) {
- len++;
- continue;
- }
-
- factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- factor++;
-
- len += factor;
- }
-
- data = coh901318_lli_alloc(&cohc->base->pool, len);
-
- if (data == NULL)
- goto err_dma_alloc;
-
- /* initiate allocated data list */
- cohd->pending_irqs =
- coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
- cohc_dev_addr(cohc),
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- cohd->data = data;
-
- cohd->flags = flags;
-
- COH_DBG(coh901318_list_print(cohc, data));
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err_dma_alloc:
- err_direction:
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
- spin_unlock_irqrestore(&cohc->lock, flg);
- out:
- return NULL;
-}
-
-static enum dma_status
-coh901318_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done,
- dma_cookie_t *used)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- int ret;
-
- last_complete = cohc->completed;
- last_used = chan->cookie;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
- return ret;
-}
-
-static void
-coh901318_issue_pending(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Busy means that pending jobs are already being processed */
- if (!cohc->busy)
- coh901318_queue_start(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-
-static void
-coh901318_terminate_all(struct dma_chan *chan)
-{
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_desc *cohd;
- void __iomem *virtbase = cohc->base->virtbase;
-
- coh901318_stop(chan);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Clear any pending BE or TC interrupt */
- if (cohc->id < 32) {
- writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- enable_powersave(cohc);
-
- while ((cohd = coh901318_first_active_get(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
-
- coh901318_desc_remove(cohd);
-
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd);
- }
-
- while ((cohd = coh901318_first_queued(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
-
- coh901318_desc_remove(cohd);
-
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd);
- }
-
-
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
- cohc->pending_irqs = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
- struct coh901318_base *base)
-{
- int chans_i;
- int i = 0;
- struct coh901318_chan *cohc;
-
- INIT_LIST_HEAD(&dma->channels);
-
- for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
- for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
- cohc = &base->chans[i];
-
- cohc->base = base;
- cohc->chan.device = dma;
- cohc->id = i;
-
- /* TODO: do we really need this lock if only one
- * client is connected to each channel?
- */
-
- spin_lock_init(&cohc->lock);
-
- cohc->pending_irqs = 0;
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
- INIT_LIST_HEAD(&cohc->free);
- INIT_LIST_HEAD(&cohc->active);
- INIT_LIST_HEAD(&cohc->queue);
-
- tasklet_init(&cohc->tasklet, dma_tasklet,
- (unsigned long) cohc);
-
- list_add_tail(&cohc->chan.device_node,
- &dma->channels);
- }
- }
-}
-
-static int __init coh901318_probe(struct platform_device *pdev)
-{
- int err = 0;
- struct coh901318_platform *pdata;
- struct coh901318_base *base;
- int irq;
- struct resource *io;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- goto err_get_resource;
-
- /* Map DMA controller registers to virtual memory */
- if (request_mem_region(io->start,
- resource_size(io),
- pdev->dev.driver->name) == NULL) {
- err = -EBUSY;
- goto err_request_mem;
- }
-
- pdata = pdev->dev.platform_data;
- if (!pdata)
- goto err_no_platformdata;
-
- base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
- sizeof(struct coh901318_chan),
- GFP_KERNEL);
- if (!base)
- goto err_alloc_coh_dma_channels;
-
- base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
-
- base->virtbase = ioremap(io->start, resource_size(io));
- if (!base->virtbase) {
- err = -ENOMEM;
- goto err_no_ioremap;
- }
-
- base->dev = &pdev->dev;
- base->platform = pdata;
- spin_lock_init(&base->pm.lock);
- base->pm.started_channels = 0;
-
- COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
-
- platform_set_drvdata(pdev, base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- goto err_no_irq;
-
- err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
- "coh901318", base);
- if (err) {
- dev_crit(&pdev->dev,
- "Cannot allocate IRQ for DMA controller!\n");
- goto err_request_irq;
- }
-
- err = coh901318_pool_create(&base->pool, &pdev->dev,
- sizeof(struct coh901318_lli),
- 32);
- if (err)
- goto err_pool_create;
-
- /* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
- base);
-
- dma_cap_zero(base->dma_slave.cap_mask);
- dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
-
- base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
- base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
- base->dma_slave.dev = &pdev->dev;
-
- err = dma_async_device_register(&base->dma_slave);
-
- if (err)
- goto err_register_slave;
-
- /* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
- base);
-
- dma_cap_zero(base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
- base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
- base->dma_memcpy.dev = &pdev->dev;
- err = dma_async_device_register(&base->dma_memcpy);
-
- if (err)
- goto err_register_memcpy;
-
- dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
- (u32) base->virtbase);
-
- return err;
-
- err_register_memcpy:
- dma_async_device_unregister(&base->dma_slave);
- err_register_slave:
- coh901318_pool_destroy(&base->pool);
- err_pool_create:
- free_irq(platform_get_irq(pdev, 0), base);
- err_request_irq:
- err_no_irq:
- iounmap(base->virtbase);
- err_no_ioremap:
- kfree(base);
- err_alloc_coh_dma_channels:
- err_no_platformdata:
- release_mem_region(pdev->resource->start,
- resource_size(pdev->resource));
- err_request_mem:
- err_get_resource:
- return err;
-}
-
-static int __exit coh901318_remove(struct platform_device *pdev)
-{
- struct coh901318_base *base = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&base->dma_memcpy);
- dma_async_device_unregister(&base->dma_slave);
- coh901318_pool_destroy(&base->pool);
- free_irq(platform_get_irq(pdev, 0), base);
- kfree(base);
- iounmap(base->virtbase);
- release_mem_region(pdev->resource->start,
- resource_size(pdev->resource));
- return 0;
-}
-
-
-static struct platform_driver coh901318_driver = {
- .remove = __exit_p(coh901318_remove),
- .driver = {
- .name = "coh901318",
- },
-};
-
-int __init coh901318_init(void)
-{
- return platform_driver_probe(&coh901318_driver, coh901318_probe);
-}
-subsys_initcall(coh901318_init);
-
-void __exit coh901318_exit(void)
-{
- platform_driver_unregister(&coh901318_driver);
-}
-module_exit(coh901318_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Per Friden");
diff --git a/trunk/drivers/dma/coh901318_lli.c b/trunk/drivers/dma/coh901318_lli.c
deleted file mode 100644
index f5120f238a4d..000000000000
--- a/trunk/drivers/dma/coh901318_lli.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * driver/dma/coh901318_lli.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for dma
- * Author: Per Friden
- */
-
-#include
-#include
-#include
-#include
-#include
-
-#include "coh901318_lli.h"
-
-#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
-#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
-#else
-#define DEBUGFS_POOL_COUNTER_RESET(pool)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
-#endif
-
-static struct coh901318_lli *
-coh901318_lli_next(struct coh901318_lli *data)
-{
- if (data == NULL || data->link_addr == 0)
- return NULL;
-
- return (struct coh901318_lli *) data->virt_link_addr;
-}
-
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t size, size_t align)
-{
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
-
- DEBUGFS_POOL_COUNTER_RESET(pool);
- return 0;
-}
-
-int coh901318_pool_destroy(struct coh901318_pool *pool)
-{
-
- dma_pool_destroy(pool->dmapool);
- return 0;
-}
-
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
-{
- int i;
- struct coh901318_lli *head;
- struct coh901318_lli *lli;
- struct coh901318_lli *lli_prev;
- dma_addr_t phy;
-
- if (len == 0)
- goto err;
-
- spin_lock(&pool->lock);
-
- head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (head == NULL)
- goto err;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
-
- lli = head;
- lli->phy_this = phy;
-
- for (i = 1; i < len; i++) {
- lli_prev = lli;
-
- lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (lli == NULL)
- goto err_clean_up;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
- lli->phy_this = phy;
-
- lli_prev->link_addr = phy;
- lli_prev->virt_link_addr = lli;
- }
-
- lli->link_addr = 0x00000000U;
-
- spin_unlock(&pool->lock);
-
- return head;
-
- err:
- spin_unlock(&pool->lock);
- return NULL;
-
- err_clean_up:
- lli_prev->link_addr = 0x00000000U;
- spin_unlock(&pool->lock);
- coh901318_lli_free(pool, &head);
- return NULL;
-}
-
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli)
-{
- struct coh901318_lli *l;
- struct coh901318_lli *next;
-
- if (lli == NULL)
- return;
-
- l = *lli;
-
- if (l == NULL)
- return;
-
- spin_lock(&pool->lock);
-
- while (l->link_addr) {
- next = l->virt_link_addr;
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
- l = next;
- }
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
-
- spin_unlock(&pool->lock);
- *lli = NULL;
-}
-
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t source, unsigned int size,
- dma_addr_t destination, u32 ctrl_chained,
- u32 ctrl_eom)
-{
- int s = size;
- dma_addr_t src = source;
- dma_addr_t dst = destination;
-
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- while (lli->link_addr) {
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- s -= MAX_DMA_PACKET_SIZE;
- lli = coh901318_lli_next(lli);
-
- src += MAX_DMA_PACKET_SIZE;
- dst += MAX_DMA_PACKET_SIZE;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- /* One irq per single transfer */
- return 1;
-}
-
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_data_direction dir)
-{
- int s = size;
- dma_addr_t src;
- dma_addr_t dst;
-
-
- if (dir == DMA_TO_DEVICE) {
- src = buf;
- dst = dev_addr;
-
- } else if (dir == DMA_FROM_DEVICE) {
-
- src = dev_addr;
- dst = buf;
- } else {
- return -EINVAL;
- }
-
- while (lli->link_addr) {
- size_t block_size = MAX_DMA_PACKET_SIZE;
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
-
- /* If we are on the next-to-final block and there will
- * be less than half a DMA packet left for the last
- * block, then we want to make this block a little
- * smaller to balance the sizes. This is meant to
- * avoid too small transfers if the buffer size is
- * (MAX_DMA_PACKET_SIZE*N + 1) */
- if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
- block_size = MAX_DMA_PACKET_SIZE/2;
-
- s -= block_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- lli = coh901318_lli_next(lli);
-
- if (dir == DMA_TO_DEVICE)
- src += block_size;
- else if (dir == DMA_FROM_DEVICE)
- dst += block_size;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- /* One irq per single transfer */
- return 1;
-}
-
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sgl, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
- u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask)
-{
- int i;
- struct scatterlist *sg;
- u32 ctrl_sg;
- dma_addr_t src = 0;
- dma_addr_t dst = 0;
- int nbr_of_irq = 0;
- u32 bytes_to_transfer;
- u32 elem_size;
-
- if (lli == NULL)
- goto err;
-
- spin_lock(&pool->lock);
-
- if (dir == DMA_TO_DEVICE)
- dst = dev_addr;
- else if (dir == DMA_FROM_DEVICE)
- src = dev_addr;
- else
- goto err;
-
- for_each_sg(sgl, sg, nents, i) {
- if (sg_is_chain(sg)) {
- /* sg continues to the next sg-element don't
- * send ctrl_finish until the last
- * sg-element in the chain
- */
- ctrl_sg = ctrl_chained;
- } else if (i == nents - 1)
- ctrl_sg = ctrl_last;
- else
- ctrl_sg = ctrl ? ctrl : ctrl_last;
-
-
- if ((ctrl_sg & ctrl_irq_mask))
- nbr_of_irq++;
-
- if (dir == DMA_TO_DEVICE)
- /* increment source address */
- src = sg_dma_address(sg);
- else
- /* increment destination address */
- dst = sg_dma_address(sg);
-
- bytes_to_transfer = sg_dma_len(sg);
-
- while (bytes_to_transfer) {
- u32 val;
-
- if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
- elem_size = MAX_DMA_PACKET_SIZE;
- val = ctrl_chained;
- } else {
- elem_size = bytes_to_transfer;
- val = ctrl_sg;
- }
-
- lli->control = val | elem_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- if (dir == DMA_FROM_DEVICE)
- dst += elem_size;
- else
- src += elem_size;
-
- BUG_ON(lli->link_addr & 3);
-
- bytes_to_transfer -= elem_size;
- lli = coh901318_lli_next(lli);
- }
-
- }
- spin_unlock(&pool->lock);
-
- /* There can be many IRQs per sg transfer */
- return nbr_of_irq;
- err:
- spin_unlock(&pool->lock);
- return -EINVAL;
-}
diff --git a/trunk/drivers/dma/coh901318_lli.h b/trunk/drivers/dma/coh901318_lli.h
deleted file mode 100644
index 7bf713b79c6b..000000000000
--- a/trunk/drivers/dma/coh901318_lli.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * driver/dma/coh901318_lli.h
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for coh901318
- * Author: Per Friden
- */
-
-#ifndef COH901318_LLI_H
-#define COH901318_LLI_H
-
-#include
-
-struct device;
-
-struct coh901318_pool {
- spinlock_t lock;
- struct dma_pool *dmapool;
- struct device *dev;
-
-#ifdef CONFIG_DEBUG_FS
- int debugfs_pool_counter;
-#endif
-};
-
-struct device;
-/**
- * coh901318_pool_create() - Creates an dma pool for lli:s
- * @pool: pool handle
- * @dev: dma device
- * @lli_nbr: number of lli:s in the pool
- * @algin: adress alignemtn of lli:s
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t lli_nbr, size_t align);
-
-/**
- * coh901318_pool_destroy() - Destroys the dma pool
- * @pool: pool handle
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_destroy(struct coh901318_pool *pool);
-
-/**
- * coh901318_lli_alloc() - Allocates a linked list
- *
- * @pool: pool handle
- * @len: length to list
- * return: none NULL if success otherwise NULL
- */
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool,
- unsigned int len);
-
-/**
- * coh901318_lli_free() - Returns the linked list items to the pool
- * @pool: pool handle
- * @lli: reference to lli pointer to be freed
- */
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli);
-
-/**
- * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
- * @pool: pool handle
- * @lli: allocated lli
- * @src: src address
- * @size: transfer size
- * @dst: destination address
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t src, unsigned int size,
- dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @buf: transfer buffer
- * @size: transfer size
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_data_direction dir);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @sg: scatter gather list
- * @nents: number of entries in sg
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl: ctrl of middle lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * @ctrl_irq_mask: ctrl mask for CPU interrupt
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sg, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained,
- u32 ctrl, u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask);
-
-#endif /* COH901318_LLI_H */
diff --git a/trunk/drivers/dma/dmatest.c b/trunk/drivers/dma/dmatest.c
index 8b905161fbf4..a32a4cf7b1e0 100644
--- a/trunk/drivers/dma/dmatest.c
+++ b/trunk/drivers/dma/dmatest.c
@@ -298,6 +298,10 @@ static int dmatest_func(void *data)
total_tests++;
+ len = dmatest_random() % test_buf_size + 1;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst_off = dmatest_random() % (test_buf_size - len + 1);
+
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
@@ -306,19 +310,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_PQ)
align = dev->pq_align;
- if (1 << align > test_buf_size) {
- pr_err("%u-byte buffer too small for %d-byte alignment\n",
- test_buf_size, 1 << align);
- break;
- }
-
- len = dmatest_random() % test_buf_size + 1;
len = (len >> align) << align;
- if (!len)
- len = 1 << align;
- src_off = dmatest_random() % (test_buf_size - len + 1);
- dst_off = dmatest_random() % (test_buf_size - len + 1);
-
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
diff --git a/trunk/drivers/dma/iop-adma.c b/trunk/drivers/dma/iop-adma.c
index ca6e6a0cb793..645ca8d54ec4 100644
--- a/trunk/drivers/dma/iop-adma.c
+++ b/trunk/drivers/dma/iop-adma.c
@@ -1470,7 +1470,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
+ res->end - res->start, pdev->name))
return -EBUSY;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
@@ -1542,7 +1542,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
iop_chan->device = adev;
iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ res->end - res->start);
if (!iop_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_iop_chan;
diff --git a/trunk/drivers/dma/ppc4xx/Makefile b/trunk/drivers/dma/ppc4xx/Makefile
deleted file mode 100644
index b3d259b3e52a..000000000000
--- a/trunk/drivers/dma/ppc4xx/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
diff --git a/trunk/drivers/dma/ppc4xx/adma.c b/trunk/drivers/dma/ppc4xx/adma.c
deleted file mode 100644
index 0a3478e910f0..000000000000
--- a/trunk/drivers/dma/ppc4xx/adma.c
+++ /dev/null
@@ -1,5027 +0,0 @@
-/*
- * Copyright (C) 2006-2009 DENX Software Engineering.
- *
- * Author: Yuri Tikhonov
- *
- * Further porting to arch/powerpc by
- * Anatolij Gustschin
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-
-/*
- * This driver supports the asynchrounous DMA copy and RAID engines available
- * on the AMCC PPC440SPe Processors.
- * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
- * ADMA driver written by D.Williams.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "adma.h"
-
-enum ppc_adma_init_code {
- PPC_ADMA_INIT_OK = 0,
- PPC_ADMA_INIT_MEMRES,
- PPC_ADMA_INIT_MEMREG,
- PPC_ADMA_INIT_ALLOC,
- PPC_ADMA_INIT_COHERENT,
- PPC_ADMA_INIT_CHANNEL,
- PPC_ADMA_INIT_IRQ1,
- PPC_ADMA_INIT_IRQ2,
- PPC_ADMA_INIT_REGISTER
-};
-
-static char *ppc_adma_errors[] = {
- [PPC_ADMA_INIT_OK] = "ok",
- [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
- [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
- [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
- "structure",
- [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
- "hardware descriptors",
- [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
- [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
- [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
- [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
-};
-
-static enum ppc_adma_init_code
-ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
-
-struct ppc_dma_chan_ref {
- struct dma_chan *chan;
- struct list_head node;
-};
-
-/* The list of channels exported by ppc440spe ADMA */
-struct list_head
-ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
-
-/* This flag is set when want to refetch the xor chain in the interrupt
- * handler
- */
-static u32 do_xor_refetch;
-
-/* Pointer to DMA0, DMA1 CP/CS FIFO */
-static void *ppc440spe_dma_fifo_buf;
-
-/* Pointers to last submitted to DMA0, DMA1 CDBs */
-static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
-static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
-
-/* Pointer to last linked and submitted xor CB */
-static struct ppc440spe_adma_desc_slot *xor_last_linked;
-static struct ppc440spe_adma_desc_slot *xor_last_submit;
-
-/* This array is used in data-check operations for storing a pattern */
-static char ppc440spe_qword[16];
-
-static atomic_t ppc440spe_adma_err_irq_ref;
-static dcr_host_t ppc440spe_mq_dcr_host;
-static unsigned int ppc440spe_mq_dcr_len;
-
-/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
- * the block size in transactions, then we do not allow to activate more than
- * only one RXOR transactions simultaneously. So use this var to store
- * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
- * set) or not (PPC440SPE_RXOR_RUN is clear).
- */
-static unsigned long ppc440spe_rxor_state;
-
-/* These are used in enable & check routines
- */
-static u32 ppc440spe_r6_enabled;
-static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
-static struct completion ppc440spe_r6_test_comp;
-
-static int ppc440spe_adma_dma2rxor_prep_src(
- struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_rxor *cursor, int index,
- int src_cnt, u32 addr);
-static void ppc440spe_adma_dma2rxor_set_src(
- struct ppc440spe_adma_desc_slot *desc,
- int index, dma_addr_t addr);
-static void ppc440spe_adma_dma2rxor_set_mult(
- struct ppc440spe_adma_desc_slot *desc,
- int index, u8 mult);
-
-#ifdef ADMA_LL_DEBUG
-#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
-#else
-#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
-#endif
-
-static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
-{
- struct dma_cdb *cdb;
- struct xor_cb *cb;
- int i;
-
- switch (chan->device->id) {
- case 0:
- case 1:
- cdb = block;
-
- pr_debug("CDB at %p [%d]:\n"
- "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
- "\t sg1u 0x%08x sg1l 0x%08x\n"
- "\t sg2u 0x%08x sg2l 0x%08x\n"
- "\t sg3u 0x%08x sg3l 0x%08x\n",
- cdb, chan->device->id,
- cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
- le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
- le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
- le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
- );
- break;
- case 2:
- cb = block;
-
- pr_debug("CB at %p [%d]:\n"
- "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
- "\t cbtah 0x%08x cbtal 0x%08x\n"
- "\t cblah 0x%08x cblal 0x%08x\n",
- cb, chan->device->id,
- cb->cbc, cb->cbbc, cb->cbs,
- cb->cbtah, cb->cbtal,
- cb->cblah, cb->cblal);
- for (i = 0; i < 16; i++) {
- if (i && !cb->ops[i].h && !cb->ops[i].l)
- continue;
- pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
- i, cb->ops[i].h, cb->ops[i].l);
- }
- break;
- }
-}
-
-static void print_cb_list(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *iter)
-{
- for (; iter; iter = iter->hw_next)
- print_cb(chan, iter->hw_desc);
-}
-
-static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
- unsigned int src_cnt)
-{
- int i;
-
- pr_debug("\n%s(%d):\nsrc: ", __func__, id);
- for (i = 0; i < src_cnt; i++)
- pr_debug("\t0x%016llx ", src[i]);
- pr_debug("dst:\n\t0x%016llx\n", dst);
-}
-
-static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
- unsigned int src_cnt)
-{
- int i;
-
- pr_debug("\n%s(%d):\nsrc: ", __func__, id);
- for (i = 0; i < src_cnt; i++)
- pr_debug("\t0x%016llx ", src[i]);
- pr_debug("dst: ");
- for (i = 0; i < 2; i++)
- pr_debug("\t0x%016llx ", dst[i]);
-}
-
-static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
- unsigned int src_cnt,
- const unsigned char *scf)
-{
- int i;
-
- pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
- if (scf) {
- for (i = 0; i < src_cnt; i++)
- pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
- } else {
- for (i = 0; i < src_cnt; i++)
- pr_debug("\t0x%016llx(no) ", src[i]);
- }
-
- pr_debug("dst: ");
- for (i = 0; i < 2; i++)
- pr_debug("\t0x%016llx ", src[src_cnt + i]);
-}
-
-/******************************************************************************
- * Command (Descriptor) Blocks low-level routines
- ******************************************************************************/
-/**
- * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
- * pseudo operation
- */
-static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct xor_cb *p;
-
- switch (chan->device->id) {
- case PPC440SPE_XOR_ID:
- p = desc->hw_desc;
- memset(desc->hw_desc, 0, sizeof(struct xor_cb));
- /* NOP with Command Block Complete Enable */
- p->cbc = XOR_CBCR_CBCE_BIT;
- break;
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- /* NOP with interrupt */
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- break;
- default:
- printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
- __func__);
- break;
- }
-}
-
-/**
- * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
- * pseudo operation
- */
-static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
-{
- memset(desc->hw_desc, 0, sizeof(struct xor_cb));
- desc->hw_next = NULL;
- desc->src_cnt = 0;
- desc->dst_cnt = 1;
-}
-
-/**
- * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
- */
-static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
- int src_cnt, unsigned long flags)
-{
- struct xor_cb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct xor_cb));
- desc->hw_next = NULL;
- desc->src_cnt = src_cnt;
- desc->dst_cnt = 1;
-
- hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
- if (flags & DMA_PREP_INTERRUPT)
- /* Enable interrupt on completion */
- hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
-}
-
-/**
- * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
- * operation in DMA2 controller
- */
-static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
- int dst_cnt, int src_cnt, unsigned long flags)
-{
- struct xor_cb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct xor_cb));
- desc->hw_next = NULL;
- desc->src_cnt = src_cnt;
- desc->dst_cnt = dst_cnt;
- memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
- desc->descs_per_op = 0;
-
- hw_desc->cbc = XOR_CBCR_TGT_BIT;
- if (flags & DMA_PREP_INTERRUPT)
- /* Enable interrupt on completion */
- hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
-}
-
-#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
-#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
-#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
-
-/**
- * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
- * with DMA0/1
- */
-static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
- int dst_cnt, int src_cnt, unsigned long flags,
- unsigned long op)
-{
- struct dma_cdb *hw_desc;
- struct ppc440spe_adma_desc_slot *iter;
- u8 dopc;
-
- /* Common initialization of a PQ descriptors chain */
- set_bits(op, &desc->flags);
- desc->src_cnt = src_cnt;
- desc->dst_cnt = dst_cnt;
-
- /* WXOR MULTICAST if both P and Q are being computed
- * MV_SG1_SG2 if Q only
- */
- dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
- DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
-
- list_for_each_entry(iter, &desc->group_list, chain_node) {
- hw_desc = iter->hw_desc;
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
-
- if (likely(!list_is_last(&iter->chain_node,
- &desc->group_list))) {
- /* set 'next' pointer */
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot, chain_node);
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
- } else {
- /* this is the last descriptor.
- * this slot will be pasted from ADMA level
- * each time it wants to configure parameters
- * of the transaction (src, dst, ...)
- */
- iter->hw_next = NULL;
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &iter->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
- }
- }
-
- /* Set OPS depending on WXOR/RXOR type of operation */
- if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
- /* This is a WXOR only chain:
- * - first descriptors are for zeroing destinations
- * if PPC440SPE_ZERO_P/Q set;
- * - descriptors remained are for GF-XOR operations.
- */
- iter = list_first_entry(&desc->group_list,
- struct ppc440spe_adma_desc_slot,
- chain_node);
-
- if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
-
- if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
-
- list_for_each_entry_from(iter, &desc->group_list, chain_node) {
- hw_desc = iter->hw_desc;
- hw_desc->opc = dopc;
- }
- } else {
- /* This is either RXOR-only or mixed RXOR/WXOR */
-
- /* The first 1 or 2 slots in chain are always RXOR,
- * if need to calculate P & Q, then there are two
- * RXOR slots; if only P or only Q, then there is one
- */
- iter = list_first_entry(&desc->group_list,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
-
- if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- }
-
- /* The remaining descs (if any) are WXORs */
- if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- list_for_each_entry_from(iter, &desc->group_list,
- chain_node) {
- hw_desc = iter->hw_desc;
- hw_desc->opc = dopc;
- }
- }
- }
-}
-
-/**
- * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
- * for PQ_ZERO_SUM operation
- */
-static void ppc440spe_desc_init_dma01pqzero_sum(
- struct ppc440spe_adma_desc_slot *desc,
- int dst_cnt, int src_cnt)
-{
- struct dma_cdb *hw_desc;
- struct ppc440spe_adma_desc_slot *iter;
- int i = 0;
- u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
- DMA_CDB_OPC_MV_SG1_SG2;
- /*
- * Initialize starting from 2nd or 3rd descriptor dependent
- * on dst_cnt. First one or two slots are for cloning P
- * and/or Q to chan->pdest and/or chan->qdest as we have
- * to preserve original P/Q.
- */
- iter = list_first_entry(&desc->group_list,
- struct ppc440spe_adma_desc_slot, chain_node);
- iter = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot, chain_node);
-
- if (dst_cnt > 1) {
- iter = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot, chain_node);
- }
- /* initialize each source descriptor in chain */
- list_for_each_entry_from(iter, &desc->group_list, chain_node) {
- hw_desc = iter->hw_desc;
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->src_cnt = 0;
- iter->dst_cnt = 0;
-
- /* This is a ZERO_SUM operation:
- * - descriptors starting from 2nd or 3rd
- * descriptor are for GF-XOR operations;
- * - remaining descriptors are for checking the result
- */
- if (i++ < src_cnt)
- /* MV_SG1_SG2 if only Q is being verified
- * MULTICAST if both P and Q are being verified
- */
- hw_desc->opc = dopc;
- else
- /* DMA_CDB_OPC_DCHECK128 operation */
- hw_desc->opc = DMA_CDB_OPC_DCHECK128;
-
- if (likely(!list_is_last(&iter->chain_node,
- &desc->group_list))) {
- /* set 'next' pointer */
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- } else {
- /* this is the last descriptor.
- * this slot will be pasted from ADMA level
- * each time it wants to configure parameters
- * of the transaction (src, dst, ...)
- */
- iter->hw_next = NULL;
- /* always enable interrupt generation since we get
- * the status of pqzero from the handler
- */
- set_bit(PPC440SPE_DESC_INT, &iter->flags);
- }
- }
- desc->src_cnt = src_cnt;
- desc->dst_cnt = dst_cnt;
-}
-
-/**
- * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
- */
-static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
- unsigned long flags)
-{
- struct dma_cdb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- desc->hw_next = NULL;
- desc->src_cnt = 1;
- desc->dst_cnt = 1;
-
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
-}
-
-/**
- * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
- */
-static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
- int value, unsigned long flags)
-{
- struct dma_cdb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- desc->hw_next = NULL;
- desc->src_cnt = 1;
- desc->dst_cnt = 1;
-
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
- hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
- hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
- hw_desc->opc = DMA_CDB_OPC_DFILL128;
-}
-
-/**
- * ppc440spe_desc_set_src_addr - set source address into the descriptor
- */
-static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan,
- int src_idx, dma_addr_t addrh,
- dma_addr_t addrl)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
- phys_addr_t addr64, tmplow, tmphi;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- if (!addrh) {
- addr64 = addrl;
- tmphi = (addr64 >> 32);
- tmplow = (addr64 & 0xFFFFFFFF);
- } else {
- tmphi = addrh;
- tmplow = addrl;
- }
- dma_hw_desc = desc->hw_desc;
- dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
- dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
- break;
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- xor_hw_desc->ops[src_idx].l = addrl;
- xor_hw_desc->ops[src_idx].h |= addrh;
- break;
- }
-}
-
-/**
- * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
- */
-static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, u32 mult_index,
- int sg_index, unsigned char mult_value)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
- u32 *psgu;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- switch (sg_index) {
- /* for RXOR operations set multiplier
- * into source cued address
- */
- case DMA_CDB_SG_SRC:
- psgu = &dma_hw_desc->sg1u;
- break;
- /* for WXOR operations set multiplier
- * into destination cued address(es)
- */
- case DMA_CDB_SG_DST1:
- psgu = &dma_hw_desc->sg2u;
- break;
- case DMA_CDB_SG_DST2:
- psgu = &dma_hw_desc->sg3u;
- break;
- default:
- BUG();
- }
-
- *psgu |= cpu_to_le32(mult_value << mult_index);
- break;
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- break;
- default:
- BUG();
- }
-}
-
-/**
- * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
- */
-static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan,
- dma_addr_t addrh, dma_addr_t addrl,
- u32 dst_idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
- phys_addr_t addr64, tmphi, tmplow;
- u32 *psgu, *psgl;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- if (!addrh) {
- addr64 = addrl;
- tmphi = (addr64 >> 32);
- tmplow = (addr64 & 0xFFFFFFFF);
- } else {
- tmphi = addrh;
- tmplow = addrl;
- }
- dma_hw_desc = desc->hw_desc;
-
- psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
- psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
-
- *psgl = cpu_to_le32((u32)tmplow);
- *psgu |= cpu_to_le32((u32)tmphi);
- break;
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- xor_hw_desc->cbtal = addrl;
- xor_hw_desc->cbtah |= addrh;
- break;
- }
-}
-
-/**
- * ppc440spe_desc_set_byte_count - set number of data bytes involved
- * into the operation
- */
-static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan,
- u32 byte_count)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
- dma_hw_desc->cnt = cpu_to_le32(byte_count);
- break;
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- xor_hw_desc->cbbc = byte_count;
- break;
- }
-}
-
-/**
- * ppc440spe_desc_set_rxor_block_size - set RXOR block size
- */
-static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
-{
- /* assume that byte_count is aligned on the 512-boundary;
- * thus write it directly to the register (bits 23:31 are
- * reserved there).
- */
- dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
-}
-
-/**
- * ppc440spe_desc_set_dcheck - set CHECK pattern
- */
-static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, u8 *qword)
-{
- struct dma_cdb *dma_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
- iowrite32(qword[0], &dma_hw_desc->sg3l);
- iowrite32(qword[4], &dma_hw_desc->sg3u);
- iowrite32(qword[8], &dma_hw_desc->sg2l);
- iowrite32(qword[12], &dma_hw_desc->sg2u);
- break;
- default:
- BUG();
- }
-}
-
-/**
- * ppc440spe_xor_set_link - set link address in xor CB
- */
-static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
- struct ppc440spe_adma_desc_slot *next_desc)
-{
- struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
-
- if (unlikely(!next_desc || !(next_desc->phys))) {
- printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
- __func__, next_desc,
- next_desc ? next_desc->phys : 0);
- BUG();
- }
-
- xor_hw_desc->cbs = 0;
- xor_hw_desc->cblal = next_desc->phys;
- xor_hw_desc->cblah = 0;
- xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
-}
-
-/**
- * ppc440spe_desc_set_link - set the address of descriptor following this
- * descriptor in chain
- */
-static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *prev_desc,
- struct ppc440spe_adma_desc_slot *next_desc)
-{
- unsigned long flags;
- struct ppc440spe_adma_desc_slot *tail = next_desc;
-
- if (unlikely(!prev_desc || !next_desc ||
- (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
- /* If previous next is overwritten something is wrong.
- * though we may refetch from append to initiate list
- * processing; in this case - it's ok.
- */
- printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
- "prev->hw_next=0x%p\n", __func__, prev_desc,
- next_desc, prev_desc ? prev_desc->hw_next : 0);
- BUG();
- }
-
- local_irq_save(flags);
-
- /* do s/w chaining both for DMA and XOR descriptors */
- prev_desc->hw_next = next_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- break;
- case PPC440SPE_XOR_ID:
- /* bind descriptor to the chain */
- while (tail->hw_next)
- tail = tail->hw_next;
- xor_last_linked = tail;
-
- if (prev_desc == xor_last_submit)
- /* do not link to the last submitted CB */
- break;
- ppc440spe_xor_set_link(prev_desc, next_desc);
- break;
- }
-
- local_irq_restore(flags);
-}
-
-/**
- * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
- */
-static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int src_idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
- /* May have 0, 1, 2, or 3 sources */
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- if (unlikely(src_idx)) {
- printk(KERN_ERR "%s: try to get %d source for"
- " DCHECK128\n", __func__, src_idx);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case DMA_CDB_OPC_MULTICAST:
- case DMA_CDB_OPC_MV_SG1_SG2:
- if (unlikely(src_idx > 2)) {
- printk(KERN_ERR "%s: try to get %d source from"
- " DMA descr\n", __func__, src_idx);
- BUG();
- }
- if (src_idx) {
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- u8 region;
-
- if (src_idx == 1)
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- desc->unmap_len;
-
- region = (le32_to_cpu(
- dma_hw_desc->sg1u)) >>
- DMA_CUED_REGION_OFF;
-
- region &= DMA_CUED_REGION_MSK;
- switch (region) {
- case DMA_RXOR123:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 1);
- case DMA_RXOR124:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len * 3);
- case DMA_RXOR125:
- return le32_to_cpu(
- dma_hw_desc->sg1l) +
- (desc->unmap_len << 2);
- default:
- printk(KERN_ERR
- "%s: try to"
- " get src3 for region %02x"
- "PPC440SPE_DESC_RXOR12?\n",
- __func__, region);
- BUG();
- }
- } else {
- printk(KERN_ERR
- "%s: try to get %d"
- " source for non-cued descr\n",
- __func__, src_idx);
- BUG();
- }
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- return le32_to_cpu(dma_hw_desc->sg1l);
- case PPC440SPE_XOR_ID:
- /* May have up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->ops[src_idx].l;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dest_addr - extract the destination address from the
- * descriptor
- */
-static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan, int idx)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- if (likely(!idx))
- return le32_to_cpu(dma_hw_desc->sg2l);
- return le32_to_cpu(dma_hw_desc->sg3l);
- case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbtal;
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_src_num - extract the number of source addresses from
- * the descriptor
- */
-static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_hw_desc = desc->hw_desc;
-
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DFILL128:
- return 0;
- case DMA_CDB_OPC_DCHECK128:
- return 1;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_MULTICAST:
- /*
- * Only for RXOR operations we have more than
- * one source
- */
- if (le32_to_cpu(dma_hw_desc->sg1u) &
- DMA_CUED_XOR_WIN_MSK) {
- /* RXOR op, there are 2 or 3 sources */
- if (((le32_to_cpu(dma_hw_desc->sg1u) >>
- DMA_CUED_REGION_OFF) &
- DMA_CUED_REGION_MSK) == DMA_RXOR12) {
- /* RXOR 1-2 */
- return 2;
- } else {
- /* RXOR 1-2-3/1-2-4/1-2-5 */
- return 3;
- }
- }
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* up to 16 sources */
- xor_hw_desc = desc->hw_desc;
- return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_dst_num - get the number of destination addresses in
- * this descriptor
- */
-static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_cdb *dma_hw_desc;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* May be 1 or 2 destinations */
- dma_hw_desc = desc->hw_desc;
- switch (dma_hw_desc->opc) {
- case DMA_CDB_OPC_NO_OP:
- case DMA_CDB_OPC_DCHECK128:
- return 0;
- case DMA_CDB_OPC_MV_SG1_SG2:
- case DMA_CDB_OPC_DFILL128:
- return 1;
- case DMA_CDB_OPC_MULTICAST:
- if (desc->dst_cnt == 2)
- return 2;
- else
- return 1;
- default:
- printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
- __func__, dma_hw_desc->opc);
- BUG();
- }
- case PPC440SPE_XOR_ID:
- /* Always only 1 destination */
- return 1;
- default:
- BUG();
- }
- return 0;
-}
-
-/**
- * ppc440spe_desc_get_link - get the address of the descriptor that
- * follows this one
- */
-static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- if (!desc->hw_next)
- return 0;
-
- return desc->hw_next->phys;
-}
-
-/**
- * ppc440spe_desc_is_aligned - check alignment
- */
-static inline int ppc440spe_desc_is_aligned(
- struct ppc440spe_adma_desc_slot *desc, int num_slots)
-{
- return (desc->idx & (num_slots - 1)) ? 0 : 1;
-}
-
-/**
- * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
- * XOR operation
- */
-static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
- int *slots_per_op)
-{
- int slot_cnt;
-
- /* each XOR descriptor provides up to 16 source operands */
- slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
-
- if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
- return slot_cnt;
-
- printk(KERN_ERR "%s: len %d > max %d !!\n",
- __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
- BUG();
- return slot_cnt;
-}
-
-/**
- * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
- * DMA2 PQ operation
- */
-static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
- int src_cnt, size_t len)
-{
- signed long long order = 0;
- int state = 0;
- int addr_count = 0;
- int i;
- for (i = 1; i < src_cnt; i++) {
- dma_addr_t cur_addr = srcs[i];
- dma_addr_t old_addr = srcs[i-1];
- switch (state) {
- case 0:
- if (cur_addr == old_addr + len) {
- /* direct RXOR */
- order = 1;
- state = 1;
- if (i == src_cnt-1)
- addr_count++;
- } else if (old_addr == cur_addr + len) {
- /* reverse RXOR */
- order = -1;
- state = 1;
- if (i == src_cnt-1)
- addr_count++;
- } else {
- state = 3;
- }
- break;
- case 1:
- if (i == src_cnt-2 || (order == -1
- && cur_addr != old_addr - len)) {
- order = 0;
- state = 0;
- addr_count++;
- } else if (cur_addr == old_addr + len*order) {
- state = 2;
- if (i == src_cnt-1)
- addr_count++;
- } else if (cur_addr == old_addr + 2*len) {
- state = 2;
- if (i == src_cnt-1)
- addr_count++;
- } else if (cur_addr == old_addr + 3*len) {
- state = 2;
- if (i == src_cnt-1)
- addr_count++;
- } else {
- order = 0;
- state = 0;
- addr_count++;
- }
- break;
- case 2:
- order = 0;
- state = 0;
- addr_count++;
- break;
- }
- if (state == 3)
- break;
- }
- if (src_cnt <= 1 || (state != 1 && state != 2)) {
- pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
- __func__, src_cnt, state, addr_count, order);
- for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
- BUG();
- }
-
- return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
-}
-
-
-/******************************************************************************
- * ADMA channel low-level routines
- ******************************************************************************/
-
-static u32
-ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
-static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
-
-/**
- * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
- */
-static void ppc440spe_adma_device_clear_eot_status(
- struct ppc440spe_adma_chan *chan)
-{
- struct dma_regs *dma_reg;
- struct xor_regs *xor_reg;
- u8 *p = chan->device->dma_desc_pool_virt;
- struct dma_cdb *cdb;
- u32 rv, i;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* read FIFO to ack */
- dma_reg = chan->device->dma_reg;
- while ((rv = ioread32(&dma_reg->csfpl))) {
- i = rv & DMA_CDB_ADDR_MSK;
- cdb = (struct dma_cdb *)&p[i -
- (u32)chan->device->dma_desc_pool];
-
- /* Clear opcode to ack. This is necessary for
- * ZeroSum operations only
- */
- cdb->opc = 0;
-
- if (test_bit(PPC440SPE_RXOR_RUN,
- &ppc440spe_rxor_state)) {
- /* probably this is a completed RXOR op,
- * get pointer to CDB using the fact that
- * physical and virtual addresses of CDB
- * in pools have the same offsets
- */
- if (le32_to_cpu(cdb->sg1u) &
- DMA_CUED_XOR_BASE) {
- /* this is a RXOR */
- clear_bit(PPC440SPE_RXOR_RUN,
- &ppc440spe_rxor_state);
- }
- }
-
- if (rv & DMA_CDB_STATUS_MSK) {
- /* ZeroSum check failed
- */
- struct ppc440spe_adma_desc_slot *iter;
- dma_addr_t phys = rv & ~DMA_CDB_MSK;
-
- /*
- * Update the status of corresponding
- * descriptor.
- */
- list_for_each_entry(iter, &chan->chain,
- chain_node) {
- if (iter->phys == phys)
- break;
- }
- /*
- * if cannot find the corresponding
- * slot it's a bug
- */
- BUG_ON(&iter->chain_node == &chan->chain);
-
- if (iter->xor_check_result) {
- if (test_bit(PPC440SPE_DESC_PCHECK,
- &iter->flags)) {
- *iter->xor_check_result |=
- SUM_CHECK_P_RESULT;
- } else
- if (test_bit(PPC440SPE_DESC_QCHECK,
- &iter->flags)) {
- *iter->xor_check_result |=
- SUM_CHECK_Q_RESULT;
- } else
- BUG();
- }
- }
- }
-
- rv = ioread32(&dma_reg->dsts);
- if (rv) {
- pr_err("DMA%d err status: 0x%x\n",
- chan->device->id, rv);
- /* write back to clear */
- iowrite32(rv, &dma_reg->dsts);
- }
- break;
- case PPC440SPE_XOR_ID:
- /* reset status bits to ack */
- xor_reg = chan->device->xor_reg;
- rv = ioread32be(&xor_reg->sr);
- iowrite32be(rv, &xor_reg->sr);
-
- if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
- if (rv & XOR_IE_RPTIE_BIT) {
- /* Read PLB Timeout Error.
- * Try to resubmit the CB
- */
- u32 val = ioread32be(&xor_reg->ccbalr);
-
- iowrite32be(val, &xor_reg->cblalr);
-
- val = ioread32be(&xor_reg->crsr);
- iowrite32be(val | XOR_CRSR_XAE_BIT,
- &xor_reg->crsr);
- } else
- pr_err("XOR ERR 0x%x status\n", rv);
- break;
- }
-
- /* if the XORcore is idle, but there are unprocessed CBs
- * then refetch the s/w chain here
- */
- if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
- do_xor_refetch)
- ppc440spe_chan_append(chan);
- break;
- }
-}
-
-/**
- * ppc440spe_chan_is_busy - get the channel status
- */
-static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
-{
- struct dma_regs *dma_reg;
- struct xor_regs *xor_reg;
- int busy = 0;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_reg = chan->device->dma_reg;
- /* if command FIFO's head and tail pointers are equal and
- * status tail is the same as command, then channel is free
- */
- if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
- ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
- busy = 1;
- break;
- case PPC440SPE_XOR_ID:
- /* use the special status bit for the XORcore
- */
- xor_reg = chan->device->xor_reg;
- busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
- break;
- }
-
- return busy;
-}
-
-/**
- * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
- */
-static void ppc440spe_chan_set_first_xor_descriptor(
- struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *next_desc)
-{
- struct xor_regs *xor_reg = chan->device->xor_reg;
-
- if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
- printk(KERN_INFO "%s: Warn: XORcore is running "
- "when try to set the first CDB!\n",
- __func__);
-
- xor_last_submit = xor_last_linked = next_desc;
-
- iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
-
- iowrite32be(next_desc->phys, &xor_reg->cblalr);
- iowrite32be(0, &xor_reg->cblahr);
- iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
- &xor_reg->cbcr);
-
- chan->hw_chain_inited = 1;
-}
-
-/**
- * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
- * called with irqs disabled
- */
-static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- u32 pcdb;
- struct dma_regs *dma_reg = chan->device->dma_reg;
-
- pcdb = desc->phys;
- if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
- pcdb |= DMA_CDB_NO_INT;
-
- chan_last_sub[chan->device->id] = desc;
-
- ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
-
- iowrite32(pcdb, &dma_reg->cpfpl);
-}
-
-/**
- * ppc440spe_chan_append - update the h/w chain in the channel
- */
-static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
-{
- struct xor_regs *xor_reg;
- struct ppc440spe_adma_desc_slot *iter;
- struct xor_cb *xcb;
- u32 cur_desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- cur_desc = ppc440spe_chan_get_current_descriptor(chan);
-
- if (likely(cur_desc)) {
- iter = chan_last_sub[chan->device->id];
- BUG_ON(!iter);
- } else {
- /* first peer */
- iter = chan_first_cdb[chan->device->id];
- BUG_ON(!iter);
- ppc440spe_dma_put_desc(chan, iter);
- chan->hw_chain_inited = 1;
- }
-
- /* is there something new to append */
- if (!iter->hw_next)
- break;
-
- /* flush descriptors from the s/w queue to fifo */
- list_for_each_entry_continue(iter, &chan->chain, chain_node) {
- ppc440spe_dma_put_desc(chan, iter);
- if (!iter->hw_next)
- break;
- }
- break;
- case PPC440SPE_XOR_ID:
- /* update h/w links and refetch */
- if (!xor_last_submit->hw_next)
- break;
-
- xor_reg = chan->device->xor_reg;
- /* the last linked CDB has to generate an interrupt
- * that we'd be able to append the next lists to h/w
- * regardless of the XOR engine state at the moment of
- * appending of these next lists
- */
- xcb = xor_last_linked->hw_desc;
- xcb->cbc |= XOR_CBCR_CBCE_BIT;
-
- if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
- /* XORcore is idle. Refetch now */
- do_xor_refetch = 0;
- ppc440spe_xor_set_link(xor_last_submit,
- xor_last_submit->hw_next);
-
- ADMA_LL_DBG(print_cb_list(chan,
- xor_last_submit->hw_next));
-
- xor_last_submit = xor_last_linked;
- iowrite32be(ioread32be(&xor_reg->crsr) |
- XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
- &xor_reg->crsr);
- } else {
- /* XORcore is running. Refetch later in the handler */
- do_xor_refetch = 1;
- }
-
- break;
- }
-
- local_irq_restore(flags);
-}
-
-/**
- * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
- */
-static u32
-ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
-{
- struct dma_regs *dma_reg;
- struct xor_regs *xor_reg;
-
- if (unlikely(!chan->hw_chain_inited))
- /* h/w descriptor chain is not initialized yet */
- return 0;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_reg = chan->device->dma_reg;
- return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
- case PPC440SPE_XOR_ID:
- xor_reg = chan->device->xor_reg;
- return ioread32be(&xor_reg->ccbalr);
- }
- return 0;
-}
-
-/**
- * ppc440spe_chan_run - enable the channel
- */
-static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
-{
- struct xor_regs *xor_reg;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* DMAs are always enabled, do nothing */
- break;
- case PPC440SPE_XOR_ID:
- /* drain write buffer */
- xor_reg = chan->device->xor_reg;
-
- /* fetch descriptor pointed to in */
- iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
- &xor_reg->crsr);
- break;
- }
-}
-
-/******************************************************************************
- * ADMA device level
- ******************************************************************************/
-
-static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
-static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
-
-static dma_cookie_t
-ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
-
-static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
- dma_addr_t addr, int index);
-static void
-ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
- dma_addr_t addr, int index);
-
-static void
-ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
- dma_addr_t *paddr, unsigned long flags);
-static void
-ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
- dma_addr_t addr, int index);
-static void
-ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
- unsigned char mult, int index, int dst_pos);
-static void
-ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
- dma_addr_t paddr, dma_addr_t qaddr);
-
-static struct page *ppc440spe_rxor_srcs[32];
-
-/**
- * ppc440spe_can_rxor - check if the operands may be processed with RXOR
- */
-static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
-{
- int i, order = 0, state = 0;
- int idx = 0;
-
- if (unlikely(!(src_cnt > 1)))
- return 0;
-
- BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
-
- /* Skip holes in the source list before checking */
- for (i = 0; i < src_cnt; i++) {
- if (!srcs[i])
- continue;
- ppc440spe_rxor_srcs[idx++] = srcs[i];
- }
- src_cnt = idx;
-
- for (i = 1; i < src_cnt; i++) {
- char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
- char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
-
- switch (state) {
- case 0:
- if (cur_addr == old_addr + len) {
- /* direct RXOR */
- order = 1;
- state = 1;
- } else if (old_addr == cur_addr + len) {
- /* reverse RXOR */
- order = -1;
- state = 1;
- } else
- goto out;
- break;
- case 1:
- if ((i == src_cnt - 2) ||
- (order == -1 && cur_addr != old_addr - len)) {
- order = 0;
- state = 0;
- } else if ((cur_addr == old_addr + len * order) ||
- (cur_addr == old_addr + 2 * len) ||
- (cur_addr == old_addr + 3 * len)) {
- state = 2;
- } else {
- order = 0;
- state = 0;
- }
- break;
- case 2:
- order = 0;
- state = 0;
- break;
- }
- }
-
-out:
- if (state == 1 || state == 2)
- return 1;
-
- return 0;
-}
-
-/**
- * ppc440spe_adma_device_estimate - estimate the efficiency of processing
- * the operation given on this channel. It's assumed that 'chan' is
- * capable to process 'cap' type of operation.
- * @chan: channel to use
- * @cap: type of transaction
- * @dst_lst: array of destination pointers
- * @dst_cnt: number of destination operands
- * @src_lst: array of source pointers
- * @src_cnt: number of source operands
- * @src_sz: size of each source operand
- */
-static int ppc440spe_adma_estimate(struct dma_chan *chan,
- enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
- struct page **src_lst, int src_cnt, size_t src_sz)
-{
- int ef = 1;
-
- if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
- /* If RAID-6 capabilities were not activated don't try
- * to use them
- */
- if (unlikely(!ppc440spe_r6_enabled))
- return -1;
- }
- /* In the current implementation of ppc440spe ADMA driver it
- * makes sense to pick out only pq case, because it may be
- * processed:
- * (1) either using Biskup method on DMA2;
- * (2) or on DMA0/1.
- * Thus we give a favour to (1) if the sources are suitable;
- * else let it be processed on one of the DMA0/1 engines.
- * In the sum_product case where destination is also the
- * source process it on DMA0/1 only.
- */
- if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
-
- if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
- ef = 0; /* sum_product case, process on DMA0/1 */
- else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
- ef = 3; /* override (DMA0/1 + idle) */
- else
- ef = 0; /* can't process on DMA2 if !rxor */
- }
-
- /* channel idleness increases the priority */
- if (likely(ef) &&
- !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
- ef++;
-
- return ef;
-}
-
-struct dma_chan *
-ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
- struct page **dst_lst, int dst_cnt, struct page **src_lst,
- int src_cnt, size_t src_sz)
-{
- struct dma_chan *best_chan = NULL;
- struct ppc_dma_chan_ref *ref;
- int best_rank = -1;
-
- if (unlikely(!src_sz))
- return NULL;
- if (src_sz > PAGE_SIZE) {
- /*
- * should a user of the api ever pass > PAGE_SIZE requests
- * we sort out cases where temporary page-sized buffers
- * are used.
- */
- switch (cap) {
- case DMA_PQ:
- if (src_cnt == 1 && dst_lst[1] == src_lst[0])
- return NULL;
- if (src_cnt == 2 && dst_lst[1] == src_lst[1])
- return NULL;
- break;
- case DMA_PQ_VAL:
- case DMA_XOR_VAL:
- return NULL;
- default:
- break;
- }
- }
-
- list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
- if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
- int rank;
-
- rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
- dst_cnt, src_lst, src_cnt, src_sz);
- if (rank > best_rank) {
- best_rank = rank;
- best_chan = ref->chan;
- }
- }
- }
-
- return best_chan;
-}
-EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
-
-/**
- * ppc440spe_get_group_entry - get group entry with index idx
- * @tdesc: is the last allocated slot in the group.
- */
-static struct ppc440spe_adma_desc_slot *
-ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
-{
- struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
- int i = 0;
-
- if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
- printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
- __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
- BUG();
- }
-
- list_for_each_entry(iter, &tdesc->group_list, chain_node) {
- if (i++ == entry_idx)
- break;
- }
- return iter;
-}
-
-/**
- * ppc440spe_adma_free_slots - flags descriptor slots for reuse
- * @slot: Slot to free
- * Caller must hold &ppc440spe_chan->lock while calling this function
- */
-static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
- struct ppc440spe_adma_chan *chan)
-{
- int stride = slot->slots_per_op;
-
- while (stride--) {
- slot->slots_per_op = 0;
- slot = list_entry(slot->slot_node.next,
- struct ppc440spe_adma_desc_slot,
- slot_node);
- }
-}
-
-static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- u32 src_cnt, dst_cnt;
- dma_addr_t addr;
-
- /*
- * get the number of sources & destination
- * included in this descriptor and unmap
- * them all
- */
- src_cnt = ppc440spe_desc_get_src_num(desc, chan);
- dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
-
- /* unmap destinations */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- while (dst_cnt--) {
- addr = ppc440spe_desc_get_dest_addr(
- desc, chan, dst_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_FROM_DEVICE);
- }
- }
-
- /* unmap sources */
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = ppc440spe_desc_get_src_addr(
- desc, chan, src_cnt);
- dma_unmap_page(chan->device->dev,
- addr, desc->unmap_len,
- DMA_TO_DEVICE);
- }
- }
-}
-
-/**
- * ppc440spe_adma_run_tx_complete_actions - call functions to be called
- * upon completion
- */
-static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
- struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan,
- dma_cookie_t cookie)
-{
- int i;
-
- BUG_ON(desc->async_tx.cookie < 0);
- if (desc->async_tx.cookie > 0) {
- cookie = desc->async_tx.cookie;
- desc->async_tx.cookie = 0;
-
- /* call the callback (must not sleep or submit new
- * operations to this channel)
- */
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- *
- * actually, ppc's dma_unmap_page() functions are empty, so
- * the following code is just for the sake of completeness
- */
- if (chan && chan->needs_unmap && desc->group_head &&
- desc->unmap_len) {
- struct ppc440spe_adma_desc_slot *unmap =
- desc->group_head;
- /* assume 1 slot per op always */
- u32 slot_count = unmap->slot_cnt;
-
- /* Run through the group list and unmap addresses */
- for (i = 0; i < slot_count; i++) {
- BUG_ON(!unmap);
- ppc440spe_adma_unmap(chan, unmap);
- unmap = unmap->hw_next;
- }
- }
- }
-
- /* run dependent operations */
- dma_run_dependencies(&desc->async_tx);
-
- return cookie;
-}
-
-/**
- * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
- */
-static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_adma_chan *chan)
-{
- /* the client is allowed to attach dependent operations
- * until 'ack' is set
- */
- if (!async_tx_test_ack(&desc->async_tx))
- return 0;
-
- /* leave the last descriptor in the chain
- * so we can append to it
- */
- if (list_is_last(&desc->chain_node, &chan->chain) ||
- desc->phys == ppc440spe_chan_get_current_descriptor(chan))
- return 1;
-
- if (chan->device->id != PPC440SPE_XOR_ID) {
- /* our DMA interrupt handler clears opc field of
- * each processed descriptor. For all types of
- * operations except for ZeroSum we do not actually
- * need ack from the interrupt handler. ZeroSum is a
- * special case since the result of this operation
- * is available from the handler only, so if we see
- * such type of descriptor (which is unprocessed yet)
- * then leave it in chain.
- */
- struct dma_cdb *cdb = desc->hw_desc;
- if (cdb->opc == DMA_CDB_OPC_DCHECK128)
- return 1;
- }
-
- dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
- desc->phys, desc->idx, desc->slots_per_op);
-
- list_del(&desc->chain_node);
- ppc440spe_adma_free_slots(desc, chan);
- return 0;
-}
-
-/**
- * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
- * which runs through the channel CDBs list until reach the descriptor
- * currently processed. When routine determines that all CDBs of group
- * are completed then corresponding callbacks (if any) are called and slots
- * are freed.
- */
-static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
-{
- struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
- dma_cookie_t cookie = 0;
- u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
- int busy = ppc440spe_chan_is_busy(chan);
- int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
-
- dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
- chan->device->id, __func__);
-
- if (!current_desc) {
- /* There were no transactions yet, so
- * nothing to clean
- */
- return;
- }
-
- /* free completed slots from the chain starting with
- * the oldest descriptor
- */
- list_for_each_entry_safe(iter, _iter, &chan->chain,
- chain_node) {
- dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
- "busy: %d this_desc: %#llx next_desc: %#x "
- "cur: %#x ack: %d\n",
- iter->async_tx.cookie, iter->idx, busy, iter->phys,
- ppc440spe_desc_get_link(iter, chan), current_desc,
- async_tx_test_ack(&iter->async_tx));
- prefetch(_iter);
- prefetch(&_iter->async_tx);
-
- /* do not advance past the current descriptor loaded into the
- * hardware channel,subsequent descriptors are either in process
- * or have not been submitted
- */
- if (seen_current)
- break;
-
- /* stop the search if we reach the current descriptor and the
- * channel is busy, or if it appears that the current descriptor
- * needs to be re-read (i.e. has been appended to)
- */
- if (iter->phys == current_desc) {
- BUG_ON(seen_current++);
- if (busy || ppc440spe_desc_get_link(iter, chan)) {
- /* not all descriptors of the group have
- * been completed; exit.
- */
- break;
- }
- }
-
- /* detect the start of a group transaction */
- if (!slot_cnt && !slots_per_op) {
- slot_cnt = iter->slot_cnt;
- slots_per_op = iter->slots_per_op;
- if (slot_cnt <= slots_per_op) {
- slot_cnt = 0;
- slots_per_op = 0;
- }
- }
-
- if (slot_cnt) {
- if (!group_start)
- group_start = iter;
- slot_cnt -= slots_per_op;
- }
-
- /* all the members of a group are complete */
- if (slots_per_op != 0 && slot_cnt == 0) {
- struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
- int end_of_chain = 0;
-
- /* clean up the group */
- slot_cnt = group_start->slot_cnt;
- grp_iter = group_start;
- list_for_each_entry_safe_from(grp_iter, _grp_iter,
- &chan->chain, chain_node) {
-
- cookie = ppc440spe_adma_run_tx_complete_actions(
- grp_iter, chan, cookie);
-
- slot_cnt -= slots_per_op;
- end_of_chain = ppc440spe_adma_clean_slot(
- grp_iter, chan);
- if (end_of_chain && slot_cnt) {
- /* Should wait for ZeroSum completion */
- if (cookie > 0)
- chan->completed_cookie = cookie;
- return;
- }
-
- if (slot_cnt == 0 || end_of_chain)
- break;
- }
-
- /* the group should be complete at this point */
- BUG_ON(slot_cnt);
-
- slots_per_op = 0;
- group_start = NULL;
- if (end_of_chain)
- break;
- else
- continue;
- } else if (slots_per_op) /* wait for group completion */
- continue;
-
- cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
- cookie);
-
- if (ppc440spe_adma_clean_slot(iter, chan))
- break;
- }
-
- BUG_ON(!seen_current);
-
- if (cookie > 0) {
- chan->completed_cookie = cookie;
- pr_debug("\tcompleted cookie %d\n", cookie);
- }
-
-}
-
-/**
- * ppc440spe_adma_tasklet - clean up watch-dog initiator
- */
-static void ppc440spe_adma_tasklet(unsigned long data)
-{
- struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
-
- spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
- __ppc440spe_adma_slot_cleanup(chan);
- spin_unlock(&chan->lock);
-}
-
-/**
- * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
- */
-static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
-{
- spin_lock_bh(&chan->lock);
- __ppc440spe_adma_slot_cleanup(chan);
- spin_unlock_bh(&chan->lock);
-}
-
-/**
- * ppc440spe_adma_alloc_slots - allocate free slots (if any)
- */
-static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
- struct ppc440spe_adma_chan *chan, int num_slots,
- int slots_per_op)
-{
- struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
- struct ppc440spe_adma_desc_slot *alloc_start = NULL;
- struct list_head chain = LIST_HEAD_INIT(chain);
- int slots_found, retry = 0;
-
-
- BUG_ON(!num_slots || !slots_per_op);
- /* start search from the last allocated descrtiptor
- * if a contiguous allocation can not be found start searching
- * from the beginning of the list
- */
-retry:
- slots_found = 0;
- if (retry == 0)
- iter = chan->last_used;
- else
- iter = list_entry(&chan->all_slots,
- struct ppc440spe_adma_desc_slot,
- slot_node);
- list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
- slot_node) {
- prefetch(_iter);
- prefetch(&_iter->async_tx);
- if (iter->slots_per_op) {
- slots_found = 0;
- continue;
- }
-
- /* start the allocation if the slot is correctly aligned */
- if (!slots_found++)
- alloc_start = iter;
-
- if (slots_found == num_slots) {
- struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
- struct ppc440spe_adma_desc_slot *last_used = NULL;
-
- iter = alloc_start;
- while (num_slots) {
- int i;
- /* pre-ack all but the last descriptor */
- if (num_slots != slots_per_op)
- async_tx_ack(&iter->async_tx);
-
- list_add_tail(&iter->chain_node, &chain);
- alloc_tail = iter;
- iter->async_tx.cookie = 0;
- iter->hw_next = NULL;
- iter->flags = 0;
- iter->slot_cnt = num_slots;
- iter->xor_check_result = NULL;
- for (i = 0; i < slots_per_op; i++) {
- iter->slots_per_op = slots_per_op - i;
- last_used = iter;
- iter = list_entry(iter->slot_node.next,
- struct ppc440spe_adma_desc_slot,
- slot_node);
- }
- num_slots -= slots_per_op;
- }
- alloc_tail->group_head = alloc_start;
- alloc_tail->async_tx.cookie = -EBUSY;
- list_splice(&chain, &alloc_tail->group_list);
- chan->last_used = last_used;
- return alloc_tail;
- }
- }
- if (!retry++)
- goto retry;
-
- /* try to free some slots if the allocation fails */
- tasklet_schedule(&chan->irq_tasklet);
- return NULL;
-}
-
-/**
- * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
- */
-static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *slot = NULL;
- char *hw_desc;
- int i, db_sz;
- int init;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- init = ppc440spe_chan->slots_allocated ? 0 : 1;
- chan->chan_id = ppc440spe_chan->device->id;
-
- /* Allocate descriptor slots */
- i = ppc440spe_chan->slots_allocated;
- if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
- db_sz = sizeof(struct dma_cdb);
- else
- db_sz = sizeof(struct xor_cb);
-
- for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
- slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
- GFP_KERNEL);
- if (!slot) {
- printk(KERN_INFO "SPE ADMA Channel only initialized"
- " %d descriptor slots", i--);
- break;
- }
-
- hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[i * db_sz];
- dma_async_tx_descriptor_init(&slot->async_tx, chan);
- slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
- INIT_LIST_HEAD(&slot->chain_node);
- INIT_LIST_HEAD(&slot->slot_node);
- INIT_LIST_HEAD(&slot->group_list);
- slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
- slot->idx = i;
-
- spin_lock_bh(&ppc440spe_chan->lock);
- ppc440spe_chan->slots_allocated++;
- list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
- spin_unlock_bh(&ppc440spe_chan->lock);
- }
-
- if (i && !ppc440spe_chan->last_used) {
- ppc440spe_chan->last_used =
- list_entry(ppc440spe_chan->all_slots.next,
- struct ppc440spe_adma_desc_slot,
- slot_node);
- }
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: allocated %d descriptor slots\n",
- ppc440spe_chan->device->id, i);
-
- /* initialize the channel and the chain with a null operation */
- if (init) {
- switch (ppc440spe_chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- ppc440spe_chan->hw_chain_inited = 0;
- /* Use WXOR for self-testing */
- if (!ppc440spe_r6_tchan)
- ppc440spe_r6_tchan = ppc440spe_chan;
- break;
- case PPC440SPE_XOR_ID:
- ppc440spe_chan_start_null_xor(ppc440spe_chan);
- break;
- default:
- BUG();
- }
- ppc440spe_chan->needs_unmap = 1;
- }
-
- return (i > 0) ? i : -ENOMEM;
-}
-
-/**
- * ppc440spe_desc_assign_cookie - assign a cookie
- */
-static dma_cookie_t ppc440spe_desc_assign_cookie(
- struct ppc440spe_adma_chan *chan,
- struct ppc440spe_adma_desc_slot *desc)
-{
- dma_cookie_t cookie = chan->common.cookie;
-
- cookie++;
- if (cookie < 0)
- cookie = 1;
- chan->common.cookie = desc->async_tx.cookie = cookie;
- return cookie;
-}
-
-/**
- * ppc440spe_rxor_set_region_data -
- */
-static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
- u8 xor_arg_no, u32 mask)
-{
- struct xor_cb *xcb = desc->hw_desc;
-
- xcb->ops[xor_arg_no].h |= mask;
-}
-
-/**
- * ppc440spe_rxor_set_src -
- */
-static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
- u8 xor_arg_no, dma_addr_t addr)
-{
- struct xor_cb *xcb = desc->hw_desc;
-
- xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
- xcb->ops[xor_arg_no].l = addr;
-}
-
-/**
- * ppc440spe_rxor_set_mult -
- */
-static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
- u8 xor_arg_no, u8 idx, u8 mult)
-{
- struct xor_cb *xcb = desc->hw_desc;
-
- xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
-}
-
-/**
- * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
- * has been achieved
- */
-static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
-{
- dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
- chan->device->id, chan->pending);
-
- if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
- chan->pending = 0;
- ppc440spe_chan_append(chan);
- }
-}
-
-/**
- * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
- * (it's not necessary that descriptors will be submitted to the h/w
- * chains too right now)
- */
-static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct ppc440spe_adma_desc_slot *sw_desc;
- struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
- struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
- int slot_cnt;
- int slots_per_op;
- dma_cookie_t cookie;
-
- sw_desc = tx_to_ppc440spe_adma_slot(tx);
-
- group_start = sw_desc->group_head;
- slot_cnt = group_start->slot_cnt;
- slots_per_op = group_start->slots_per_op;
-
- spin_lock_bh(&chan->lock);
-
- cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
-
- if (unlikely(list_empty(&chan->chain))) {
- /* first peer */
- list_splice_init(&sw_desc->group_list, &chan->chain);
- chan_first_cdb[chan->device->id] = group_start;
- } else {
- /* isn't first peer, bind CDBs to chain */
- old_chain_tail = list_entry(chan->chain.prev,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- list_splice_init(&sw_desc->group_list,
- &old_chain_tail->chain_node);
- /* fix up the hardware chain */
- ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
- }
-
- /* increment the pending count by the number of operations */
- chan->pending += slot_cnt / slots_per_op;
- ppc440spe_adma_check_threshold(chan);
- spin_unlock_bh(&chan->lock);
-
- dev_dbg(chan->device->common.dev,
- "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
- chan->device->id, __func__,
- sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
-
- return cookie;
-}
-
-/**
- * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
- struct dma_chan *chan, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- int slot_cnt, slots_per_op;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
- __func__);
-
- spin_lock_bh(&ppc440spe_chan->lock);
- slot_cnt = slots_per_op = 1;
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
- group_start->unmap_len = 0;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
- * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- int slot_cnt, slots_per_op;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- if (unlikely(!len))
- return NULL;
-
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
-
- spin_lock_bh(&ppc440spe_chan->lock);
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s len: %u int_en %d\n",
- ppc440spe_chan->device->id, __func__, len,
- flags & DMA_PREP_INTERRUPT ? 1 : 0);
- slot_cnt = slots_per_op = 1;
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- ppc440spe_desc_init_memcpy(group_start, flags);
- ppc440spe_adma_set_dest(group_start, dma_dest, 0);
- ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
- ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
- sw_desc->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
- * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
- struct dma_chan *chan, dma_addr_t dma_dest, int value,
- size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- int slot_cnt, slots_per_op;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- if (unlikely(!len))
- return NULL;
-
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
-
- spin_lock_bh(&ppc440spe_chan->lock);
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
- ppc440spe_chan->device->id, __func__, value, len,
- flags & DMA_PREP_INTERRUPT ? 1 : 0);
-
- slot_cnt = slots_per_op = 1;
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- ppc440spe_desc_init_memset(group_start, value, flags);
- ppc440spe_adma_set_dest(group_start, dma_dest, 0);
- ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
- sw_desc->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
- * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
- struct dma_chan *chan, dma_addr_t dma_dest,
- dma_addr_t *dma_src, u32 src_cnt, size_t len,
- unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- int slot_cnt, slots_per_op;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
- dma_dest, dma_src, src_cnt));
- if (unlikely(!len))
- return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
- ppc440spe_chan->device->id, __func__, src_cnt, len,
- flags & DMA_PREP_INTERRUPT ? 1 : 0);
-
- spin_lock_bh(&ppc440spe_chan->lock);
- slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- ppc440spe_desc_init_xor(group_start, src_cnt, flags);
- ppc440spe_adma_set_dest(group_start, dma_dest, 0);
- while (src_cnt--)
- ppc440spe_adma_memcpy_xor_set_src(group_start,
- dma_src[src_cnt], src_cnt);
- ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
- sw_desc->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-static inline void
-ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
- int src_cnt);
-static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
-
-/**
- * ppc440spe_adma_init_dma2rxor_slot -
- */
-static void ppc440spe_adma_init_dma2rxor_slot(
- struct ppc440spe_adma_desc_slot *desc,
- dma_addr_t *src, int src_cnt)
-{
- int i;
-
- /* initialize CDB */
- for (i = 0; i < src_cnt; i++) {
- ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
- desc->src_cnt, (u32)src[i]);
- }
-}
-
-/**
- * ppc440spe_dma01_prep_mult -
- * for Q operation where destination is also the source
- */
-static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
- struct ppc440spe_adma_chan *ppc440spe_chan,
- dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
- const unsigned char *scf, size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_desc_slot *sw_desc = NULL;
- unsigned long op = 0;
- int slot_cnt;
-
- set_bit(PPC440SPE_DESC_WXOR, &op);
- slot_cnt = 2;
-
- spin_lock_bh(&ppc440spe_chan->lock);
-
- /* use WXOR, each descriptor occupies one slot */
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
- if (sw_desc) {
- struct ppc440spe_adma_chan *chan;
- struct ppc440spe_adma_desc_slot *iter;
- struct dma_cdb *hw_desc;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
- set_bits(op, &sw_desc->flags);
- sw_desc->src_cnt = src_cnt;
- sw_desc->dst_cnt = dst_cnt;
- /* First descriptor, zero data in the destination and copy it
- * to q page using MULTICAST transfer.
- */
- iter = list_first_entry(&sw_desc->group_list,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- /* set 'next' pointer */
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MULTICAST;
-
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, dst[0], 0);
- ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
- ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
- src[0]);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
- iter->unmap_len = len;
-
- /*
- * Second descriptor, multiply data from the q page
- * and store the result in real destination.
- */
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->hw_next = NULL;
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &iter->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
-
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- ppc440spe_desc_set_src_addr(iter, chan, 0,
- DMA_CUED_XOR_HB, dst[1]);
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, dst[0], 0);
-
- ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
- DMA_CDB_SG_DST1, scf[0]);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
- iter->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
-
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc;
-}
-
-/**
- * ppc440spe_dma01_prep_sum_product -
- * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
- * the source.
- */
-static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
- struct ppc440spe_adma_chan *ppc440spe_chan,
- dma_addr_t *dst, dma_addr_t *src, int src_cnt,
- const unsigned char *scf, size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_desc_slot *sw_desc = NULL;
- unsigned long op = 0;
- int slot_cnt;
-
- set_bit(PPC440SPE_DESC_WXOR, &op);
- slot_cnt = 3;
-
- spin_lock_bh(&ppc440spe_chan->lock);
-
- /* WXOR, each descriptor occupies one slot */
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
- if (sw_desc) {
- struct ppc440spe_adma_chan *chan;
- struct ppc440spe_adma_desc_slot *iter;
- struct dma_cdb *hw_desc;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
- set_bits(op, &sw_desc->flags);
- sw_desc->src_cnt = src_cnt;
- sw_desc->dst_cnt = 1;
- /* 1st descriptor, src[1] data to q page and zero destination */
- iter = list_first_entry(&sw_desc->group_list,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MULTICAST;
-
- ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
- *dst, 0);
- ppc440spe_desc_set_dest_addr(iter, chan, 0,
- ppc440spe_chan->qdest, 1);
- ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
- src[1]);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
- iter->unmap_len = len;
-
- /* 2nd descriptor, multiply src[1] data and store the
- * result in destination */
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- /* set 'next' pointer */
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &iter->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
-
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
- ppc440spe_chan->qdest);
- ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
- *dst, 0);
- ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
- DMA_CDB_SG_DST1, scf[1]);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
- iter->unmap_len = len;
-
- /*
- * 3rd descriptor, multiply src[0] data and xor it
- * with destination
- */
- iter = list_first_entry(&iter->chain_node,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->hw_next = NULL;
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &iter->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &iter->flags);
-
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
- src[0]);
- ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
- *dst, 0);
- ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
- DMA_CDB_SG_DST1, scf[0]);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
- iter->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
-
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc;
-}
-
-static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
- struct ppc440spe_adma_chan *ppc440spe_chan,
- dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
- const unsigned char *scf, size_t len, unsigned long flags)
-{
- int slot_cnt;
- struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
- unsigned long op = 0;
- unsigned char mult = 1;
-
- pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
- __func__, dst_cnt, src_cnt, len);
- /* select operations WXOR/RXOR depending on the
- * source addresses of operators and the number
- * of destinations (RXOR support only Q-parity calculations)
- */
- set_bit(PPC440SPE_DESC_WXOR, &op);
- if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
- /* no active RXOR;
- * do RXOR if:
- * - there are more than 1 source,
- * - len is aligned on 512-byte boundary,
- * - source addresses fit to one of 4 possible regions.
- */
- if (src_cnt > 1 &&
- !(len & MQ0_CF2H_RXOR_BS_MASK) &&
- (src[0] + len) == src[1]) {
- /* may do RXOR R1 R2 */
- set_bit(PPC440SPE_DESC_RXOR, &op);
- if (src_cnt != 2) {
- /* may try to enhance region of RXOR */
- if ((src[1] + len) == src[2]) {
- /* do RXOR R1 R2 R3 */
- set_bit(PPC440SPE_DESC_RXOR123,
- &op);
- } else if ((src[1] + len * 2) == src[2]) {
- /* do RXOR R1 R2 R4 */
- set_bit(PPC440SPE_DESC_RXOR124, &op);
- } else if ((src[1] + len * 3) == src[2]) {
- /* do RXOR R1 R2 R5 */
- set_bit(PPC440SPE_DESC_RXOR125,
- &op);
- } else {
- /* do RXOR R1 R2 */
- set_bit(PPC440SPE_DESC_RXOR12,
- &op);
- }
- } else {
- /* do RXOR R1 R2 */
- set_bit(PPC440SPE_DESC_RXOR12, &op);
- }
- }
-
- if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
- /* can not do this operation with RXOR */
- clear_bit(PPC440SPE_RXOR_RUN,
- &ppc440spe_rxor_state);
- } else {
- /* can do; set block size right now */
- ppc440spe_desc_set_rxor_block_size(len);
- }
- }
-
- /* Number of necessary slots depends on operation type selected */
- if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
- /* This is a WXOR only chain. Need descriptors for each
- * source to GF-XOR them with WXOR, and need descriptors
- * for each destination to zero them with WXOR
- */
- slot_cnt = src_cnt;
-
- if (flags & DMA_PREP_ZERO_P) {
- slot_cnt++;
- set_bit(PPC440SPE_ZERO_P, &op);
- }
- if (flags & DMA_PREP_ZERO_Q) {
- slot_cnt++;
- set_bit(PPC440SPE_ZERO_Q, &op);
- }
- } else {
- /* Need 1/2 descriptor for RXOR operation, and
- * need (src_cnt - (2 or 3)) for WXOR of sources
- * remained (if any)
- */
- slot_cnt = dst_cnt;
-
- if (flags & DMA_PREP_ZERO_P)
- set_bit(PPC440SPE_ZERO_P, &op);
- if (flags & DMA_PREP_ZERO_Q)
- set_bit(PPC440SPE_ZERO_Q, &op);
-
- if (test_bit(PPC440SPE_DESC_RXOR12, &op))
- slot_cnt += src_cnt - 2;
- else
- slot_cnt += src_cnt - 3;
-
- /* Thus we have either RXOR only chain or
- * mixed RXOR/WXOR
- */
- if (slot_cnt == dst_cnt)
- /* RXOR only chain */
- clear_bit(PPC440SPE_DESC_WXOR, &op);
- }
-
- spin_lock_bh(&ppc440spe_chan->lock);
- /* for both RXOR/WXOR each descriptor occupies one slot */
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
- if (sw_desc) {
- ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
- flags, op);
-
- /* setup dst/src/mult */
- pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
- __func__, dst[0], dst[1]);
- ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
- while (src_cnt--) {
- ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
- src_cnt);
-
- /* NOTE: "Multi = 0 is equivalent to = 1" as it
- * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
- * doesn't work for RXOR with DMA0/1! Instead, multi=0
- * leads to zeroing source data after RXOR.
- * So, for P case set-up mult=1 explicitly.
- */
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- mult = scf[src_cnt];
- ppc440spe_adma_pq_set_src_mult(sw_desc,
- mult, src_cnt, dst_cnt - 1);
- }
-
- /* Setup byte count foreach slot just allocated */
- sw_desc->async_tx.flags = flags;
- list_for_each_entry(iter, &sw_desc->group_list,
- chain_node) {
- ppc440spe_desc_set_byte_count(iter,
- ppc440spe_chan, len);
- iter->unmap_len = len;
- }
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc;
-}
-
-static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
- struct ppc440spe_adma_chan *ppc440spe_chan,
- dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
- const unsigned char *scf, size_t len, unsigned long flags)
-{
- int slot_cnt, descs_per_op;
- struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
- unsigned long op = 0;
- unsigned char mult = 1;
-
- BUG_ON(!dst_cnt);
- /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
- __func__, dst_cnt, src_cnt, len);*/
-
- spin_lock_bh(&ppc440spe_chan->lock);
- descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
- if (descs_per_op < 0) {
- spin_unlock_bh(&ppc440spe_chan->lock);
- return NULL;
- }
-
- /* depending on number of sources we have 1 or 2 RXOR chains */
- slot_cnt = descs_per_op * dst_cnt;
-
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
- if (sw_desc) {
- op = slot_cnt;
- sw_desc->async_tx.flags = flags;
- list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
- ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
- --op ? 0 : flags);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
- len);
- iter->unmap_len = len;
-
- ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
- iter->rxor_cursor.len = len;
- iter->descs_per_op = descs_per_op;
- }
- op = 0;
- list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
- op++;
- if (op % descs_per_op == 0)
- ppc440spe_adma_init_dma2rxor_slot(iter, src,
- src_cnt);
- if (likely(!list_is_last(&iter->chain_node,
- &sw_desc->group_list))) {
- /* set 'next' pointer */
- iter->hw_next =
- list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- ppc440spe_xor_set_link(iter, iter->hw_next);
- } else {
- /* this is the last descriptor. */
- iter->hw_next = NULL;
- }
- }
-
- /* fixup head descriptor */
- sw_desc->dst_cnt = dst_cnt;
- if (flags & DMA_PREP_ZERO_P)
- set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
- if (flags & DMA_PREP_ZERO_Q)
- set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
-
- /* setup dst/src/mult */
- ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
-
- while (src_cnt--) {
- /* handle descriptors (if dst_cnt == 2) inside
- * the ppc440spe_adma_pq_set_srcxxx() functions
- */
- ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
- src_cnt);
- if (!(flags & DMA_PREP_PQ_DISABLE_Q))
- mult = scf[src_cnt];
- ppc440spe_adma_pq_set_src_mult(sw_desc,
- mult, src_cnt, dst_cnt - 1);
- }
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
- ppc440spe_desc_set_rxor_block_size(len);
- return sw_desc;
-}
-
-/**
- * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
- struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf,
- size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc = NULL;
- int dst_cnt = 0;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
- dst, src, src_cnt));
- BUG_ON(!len);
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
- BUG_ON(!src_cnt);
-
- if (src_cnt == 1 && dst[1] == src[0]) {
- dma_addr_t dest[2];
-
- /* dst[1] is real destination (Q) */
- dest[0] = dst[1];
- /* this is the page to multicast source data to */
- dest[1] = ppc440spe_chan->qdest;
- sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
- dest, 2, src, src_cnt, scf, len, flags);
- return sw_desc ? &sw_desc->async_tx : NULL;
- }
-
- if (src_cnt == 2 && dst[1] == src[1]) {
- sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
- &dst[1], src, 2, scf, len, flags);
- return sw_desc ? &sw_desc->async_tx : NULL;
- }
-
- if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
- BUG_ON(!dst[0]);
- dst_cnt++;
- flags |= DMA_PREP_ZERO_P;
- }
-
- if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
- BUG_ON(!dst[1]);
- dst_cnt++;
- flags |= DMA_PREP_ZERO_Q;
- }
-
- BUG_ON(!dst_cnt);
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
- ppc440spe_chan->device->id, __func__, src_cnt, len,
- flags & DMA_PREP_INTERRUPT ? 1 : 0);
-
- switch (ppc440spe_chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
- dst, dst_cnt, src, src_cnt, scf,
- len, flags);
- break;
-
- case PPC440SPE_XOR_ID:
- sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
- dst, dst_cnt, src, src_cnt, scf,
- len, flags);
- break;
- }
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
- * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
- * a PQ_ZERO_SUM operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
- struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf, size_t len,
- enum sum_check_flags *pqres, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *iter;
- dma_addr_t pdest, qdest;
- int slot_cnt, slots_per_op, idst, dst_cnt;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- if (flags & DMA_PREP_PQ_DISABLE_P)
- pdest = 0;
- else
- pdest = pq[0];
-
- if (flags & DMA_PREP_PQ_DISABLE_Q)
- qdest = 0;
- else
- qdest = pq[1];
-
- ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
- src, src_cnt, scf));
-
- /* Always use WXOR for P/Q calculations (two destinations).
- * Need 1 or 2 extra slots to verify results are zero.
- */
- idst = dst_cnt = (pdest && qdest) ? 2 : 1;
-
- /* One additional slot per destination to clone P/Q
- * before calculation (we have to preserve destinations).
- */
- slot_cnt = src_cnt + dst_cnt * 2;
- slots_per_op = 1;
-
- spin_lock_bh(&ppc440spe_chan->lock);
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
-
- /* Setup byte count for each slot just allocated */
- sw_desc->async_tx.flags = flags;
- list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
- len);
- iter->unmap_len = len;
- }
-
- if (pdest) {
- struct dma_cdb *hw_desc;
- struct ppc440spe_adma_chan *chan;
-
- iter = sw_desc->group_head;
- chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- iter->src_cnt = 0;
- iter->dst_cnt = 0;
- ppc440spe_desc_set_dest_addr(iter, chan, 0,
- ppc440spe_chan->pdest, 0);
- ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
- len);
- iter->unmap_len = 0;
- /* override pdest to preserve original P */
- pdest = ppc440spe_chan->pdest;
- }
- if (qdest) {
- struct dma_cdb *hw_desc;
- struct ppc440spe_adma_chan *chan;
-
- iter = list_first_entry(&sw_desc->group_list,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
-
- if (pdest) {
- iter = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
-
- memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
- iter->hw_next = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- hw_desc = iter->hw_desc;
- hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
- iter->src_cnt = 0;
- iter->dst_cnt = 0;
- ppc440spe_desc_set_dest_addr(iter, chan, 0,
- ppc440spe_chan->qdest, 0);
- ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
- ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
- len);
- iter->unmap_len = 0;
- /* override qdest to preserve original Q */
- qdest = ppc440spe_chan->qdest;
- }
-
- /* Setup destinations for P/Q ops */
- ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
-
- /* Setup zero QWORDs into DCHECK CDBs */
- idst = dst_cnt;
- list_for_each_entry_reverse(iter, &sw_desc->group_list,
- chain_node) {
- /*
- * The last CDB corresponds to Q-parity check,
- * the one before last CDB corresponds
- * P-parity check
- */
- if (idst == DMA_DEST_MAX_NUM) {
- if (idst == dst_cnt) {
- set_bit(PPC440SPE_DESC_QCHECK,
- &iter->flags);
- } else {
- set_bit(PPC440SPE_DESC_PCHECK,
- &iter->flags);
- }
- } else {
- if (qdest) {
- set_bit(PPC440SPE_DESC_QCHECK,
- &iter->flags);
- } else {
- set_bit(PPC440SPE_DESC_PCHECK,
- &iter->flags);
- }
- }
- iter->xor_check_result = pqres;
-
- /*
- * set it to zero, if check fail then result will
- * be updated
- */
- *iter->xor_check_result = 0;
- ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
- ppc440spe_qword);
-
- if (!(--dst_cnt))
- break;
- }
-
- /* Setup sources and mults for P/Q ops */
- list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
- chain_node) {
- struct ppc440spe_adma_chan *chan;
- u32 mult_dst;
-
- chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
- ppc440spe_desc_set_src_addr(iter, chan, 0,
- DMA_CUED_XOR_HB,
- src[src_cnt - 1]);
- if (qdest) {
- mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
- DMA_CDB_SG_DST1;
- ppc440spe_desc_set_src_mult(iter, chan,
- DMA_CUED_MULT1_OFF,
- mult_dst,
- scf[src_cnt - 1]);
- }
- if (!(--src_cnt))
- break;
- }
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
- * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
- * XOR ZERO_SUM operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
- struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
- size_t len, enum sum_check_flags *result, unsigned long flags)
-{
- struct dma_async_tx_descriptor *tx;
- dma_addr_t pq[2];
-
- /* validate P, disable Q */
- pq[0] = src[0];
- pq[1] = 0;
- flags |= DMA_PREP_PQ_DISABLE_Q;
-
- tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
- src_cnt - 1, 0, len,
- result, flags);
- return tx;
-}
-
-/**
- * ppc440spe_adma_set_dest - set destination address into descriptor
- */
-static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
- dma_addr_t addr, int index)
-{
- struct ppc440spe_adma_chan *chan;
-
- BUG_ON(index >= sw_desc->dst_cnt);
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* to do: support transfers lengths >
- * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
- */
- ppc440spe_desc_set_dest_addr(sw_desc->group_head,
- chan, 0, addr, index);
- break;
- case PPC440SPE_XOR_ID:
- sw_desc = ppc440spe_get_group_entry(sw_desc, index);
- ppc440spe_desc_set_dest_addr(sw_desc,
- chan, 0, addr, index);
- break;
- }
-}
-
-static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
- struct ppc440spe_adma_chan *chan, dma_addr_t addr)
-{
- /* To clear destinations update the descriptor
- * (P or Q depending on index) as follows:
- * addr is destination (0 corresponds to SG2):
- */
- ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
-
- /* ... and the addr is source: */
- ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
-
- /* addr is always SG2 then the mult is always DST1 */
- ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
- DMA_CDB_SG_DST1, 1);
-}
-
-/**
- * ppc440spe_adma_pq_set_dest - set destination address into descriptor
- * for the PQXOR operation
- */
-static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
- dma_addr_t *addrs, unsigned long flags)
-{
- struct ppc440spe_adma_desc_slot *iter;
- struct ppc440spe_adma_chan *chan;
- dma_addr_t paddr, qaddr;
- dma_addr_t addr = 0, ppath, qpath;
- int index = 0, i;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
-
- if (flags & DMA_PREP_PQ_DISABLE_P)
- paddr = 0;
- else
- paddr = addrs[0];
-
- if (flags & DMA_PREP_PQ_DISABLE_Q)
- qaddr = 0;
- else
- qaddr = addrs[1];
-
- if (!paddr || !qaddr)
- addr = paddr ? paddr : qaddr;
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* walk through the WXOR source list and set P/Q-destinations
- * for each slot:
- */
- if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
- /* This is WXOR-only chain; may have 1/2 zero descs */
- if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
- index++;
- if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
- index++;
-
- iter = ppc440spe_get_group_entry(sw_desc, index);
- if (addr) {
- /* one destination */
- list_for_each_entry_from(iter,
- &sw_desc->group_list, chain_node)
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, addr, 0);
- } else {
- /* two destinations */
- list_for_each_entry_from(iter,
- &sw_desc->group_list, chain_node) {
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, paddr, 0);
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, qaddr, 1);
- }
- }
-
- if (index) {
- /* To clear destinations update the descriptor
- * (1st,2nd, or both depending on flags)
- */
- index = 0;
- if (test_bit(PPC440SPE_ZERO_P,
- &sw_desc->flags)) {
- iter = ppc440spe_get_group_entry(
- sw_desc, index++);
- ppc440spe_adma_pq_zero_op(iter, chan,
- paddr);
- }
-
- if (test_bit(PPC440SPE_ZERO_Q,
- &sw_desc->flags)) {
- iter = ppc440spe_get_group_entry(
- sw_desc, index++);
- ppc440spe_adma_pq_zero_op(iter, chan,
- qaddr);
- }
-
- return;
- }
- } else {
- /* This is RXOR-only or RXOR/WXOR mixed chain */
-
- /* If we want to include destination into calculations,
- * then make dest addresses cued with mult=1 (XOR).
- */
- ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
- DMA_CUED_XOR_HB :
- DMA_CUED_XOR_BASE |
- (1 << DMA_CUED_MULT1_OFF);
- qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
- DMA_CUED_XOR_HB :
- DMA_CUED_XOR_BASE |
- (1 << DMA_CUED_MULT1_OFF);
-
- /* Setup destination(s) in RXOR slot(s) */
- iter = ppc440spe_get_group_entry(sw_desc, index++);
- ppc440spe_desc_set_dest_addr(iter, chan,
- paddr ? ppath : qpath,
- paddr ? paddr : qaddr, 0);
- if (!addr) {
- /* two destinations */
- iter = ppc440spe_get_group_entry(sw_desc,
- index++);
- ppc440spe_desc_set_dest_addr(iter, chan,
- qpath, qaddr, 0);
- }
-
- if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
- /* Setup destination(s) in remaining WXOR
- * slots
- */
- iter = ppc440spe_get_group_entry(sw_desc,
- index);
- if (addr) {
- /* one destination */
- list_for_each_entry_from(iter,
- &sw_desc->group_list,
- chain_node)
- ppc440spe_desc_set_dest_addr(
- iter, chan,
- DMA_CUED_XOR_BASE,
- addr, 0);
-
- } else {
- /* two destinations */
- list_for_each_entry_from(iter,
- &sw_desc->group_list,
- chain_node) {
- ppc440spe_desc_set_dest_addr(
- iter, chan,
- DMA_CUED_XOR_BASE,
- paddr, 0);
- ppc440spe_desc_set_dest_addr(
- iter, chan,
- DMA_CUED_XOR_BASE,
- qaddr, 1);
- }
- }
- }
-
- }
- break;
-
- case PPC440SPE_XOR_ID:
- /* DMA2 descriptors have only 1 destination, so there are
- * two chains - one for each dest.
- * If we want to include destination into calculations,
- * then make dest addresses cued with mult=1 (XOR).
- */
- ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
- DMA_CUED_XOR_HB :
- DMA_CUED_XOR_BASE |
- (1 << DMA_CUED_MULT1_OFF);
-
- qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
- DMA_CUED_XOR_HB :
- DMA_CUED_XOR_BASE |
- (1 << DMA_CUED_MULT1_OFF);
-
- iter = ppc440spe_get_group_entry(sw_desc, 0);
- for (i = 0; i < sw_desc->descs_per_op; i++) {
- ppc440spe_desc_set_dest_addr(iter, chan,
- paddr ? ppath : qpath,
- paddr ? paddr : qaddr, 0);
- iter = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
-
- if (!addr) {
- /* Two destinations; setup Q here */
- iter = ppc440spe_get_group_entry(sw_desc,
- sw_desc->descs_per_op);
- for (i = 0; i < sw_desc->descs_per_op; i++) {
- ppc440spe_desc_set_dest_addr(iter,
- chan, qpath, qaddr, 0);
- iter = list_entry(iter->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
- }
-
- break;
- }
-}
-
-/**
- * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
- * for the PQ_ZERO_SUM operation
- */
-static void ppc440spe_adma_pqzero_sum_set_dest(
- struct ppc440spe_adma_desc_slot *sw_desc,
- dma_addr_t paddr, dma_addr_t qaddr)
-{
- struct ppc440spe_adma_desc_slot *iter, *end;
- struct ppc440spe_adma_chan *chan;
- dma_addr_t addr = 0;
- int idx;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
-
- /* walk through the WXOR source list and set P/Q-destinations
- * for each slot
- */
- idx = (paddr && qaddr) ? 2 : 1;
- /* set end */
- list_for_each_entry_reverse(end, &sw_desc->group_list,
- chain_node) {
- if (!(--idx))
- break;
- }
- /* set start */
- idx = (paddr && qaddr) ? 2 : 1;
- iter = ppc440spe_get_group_entry(sw_desc, idx);
-
- if (paddr && qaddr) {
- /* two destinations */
- list_for_each_entry_from(iter, &sw_desc->group_list,
- chain_node) {
- if (unlikely(iter == end))
- break;
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, paddr, 0);
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, qaddr, 1);
- }
- } else {
- /* one destination */
- addr = paddr ? paddr : qaddr;
- list_for_each_entry_from(iter, &sw_desc->group_list,
- chain_node) {
- if (unlikely(iter == end))
- break;
- ppc440spe_desc_set_dest_addr(iter, chan,
- DMA_CUED_XOR_BASE, addr, 0);
- }
- }
-
- /* The remaining descriptors are DATACHECK. These have no need in
- * destination. Actually, these destinations are used there
- * as sources for check operation. So, set addr as source.
- */
- ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
-
- if (!addr) {
- end = list_entry(end->chain_node.next,
- struct ppc440spe_adma_desc_slot, chain_node);
- ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
- }
-}
-
-/**
- * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
- */
-static inline void ppc440spe_desc_set_xor_src_cnt(
- struct ppc440spe_adma_desc_slot *desc,
- int src_cnt)
-{
- struct xor_cb *hw_desc = desc->hw_desc;
-
- hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
- hw_desc->cbc |= src_cnt;
-}
-
-/**
- * ppc440spe_adma_pq_set_src - set source address into descriptor
- */
-static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
- dma_addr_t addr, int index)
-{
- struct ppc440spe_adma_chan *chan;
- dma_addr_t haddr = 0;
- struct ppc440spe_adma_desc_slot *iter = NULL;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
- */
- if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
- /* RXOR-only or RXOR/WXOR operation */
- int iskip = test_bit(PPC440SPE_DESC_RXOR12,
- &sw_desc->flags) ? 2 : 3;
-
- if (index == 0) {
- /* 1st slot (RXOR) */
- /* setup sources region (R1-2-3, R1-2-4,
- * or R1-2-5)
- */
- if (test_bit(PPC440SPE_DESC_RXOR12,
- &sw_desc->flags))
- haddr = DMA_RXOR12 <<
- DMA_CUED_REGION_OFF;
- else if (test_bit(PPC440SPE_DESC_RXOR123,
- &sw_desc->flags))
- haddr = DMA_RXOR123 <<
- DMA_CUED_REGION_OFF;
- else if (test_bit(PPC440SPE_DESC_RXOR124,
- &sw_desc->flags))
- haddr = DMA_RXOR124 <<
- DMA_CUED_REGION_OFF;
- else if (test_bit(PPC440SPE_DESC_RXOR125,
- &sw_desc->flags))
- haddr = DMA_RXOR125 <<
- DMA_CUED_REGION_OFF;
- else
- BUG();
- haddr |= DMA_CUED_XOR_BASE;
- iter = ppc440spe_get_group_entry(sw_desc, 0);
- } else if (index < iskip) {
- /* 1st slot (RXOR)
- * shall actually set source address only once
- * instead of first
- */
- iter = NULL;
- } else {
- /* 2nd/3d and next slots (WXOR);
- * skip first slot with RXOR
- */
- haddr = DMA_CUED_XOR_HB;
- iter = ppc440spe_get_group_entry(sw_desc,
- index - iskip + sw_desc->dst_cnt);
- }
- } else {
- int znum = 0;
-
- /* WXOR-only operation; skip first slots with
- * zeroing destinations
- */
- if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
- znum++;
- if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
- znum++;
-
- haddr = DMA_CUED_XOR_HB;
- iter = ppc440spe_get_group_entry(sw_desc,
- index + znum);
- }
-
- if (likely(iter)) {
- ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
-
- if (!index &&
- test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
- sw_desc->dst_cnt == 2) {
- /* if we have two destinations for RXOR, then
- * setup source in the second descr too
- */
- iter = ppc440spe_get_group_entry(sw_desc, 1);
- ppc440spe_desc_set_src_addr(iter, chan, 0,
- haddr, addr);
- }
- }
- break;
-
- case PPC440SPE_XOR_ID:
- /* DMA2 may do Biskup */
- iter = sw_desc->group_head;
- if (iter->dst_cnt == 2) {
- /* both P & Q calculations required; set P src here */
- ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
-
- /* this is for Q */
- iter = ppc440spe_get_group_entry(sw_desc,
- sw_desc->descs_per_op);
- }
- ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
- break;
- }
-}
-
-/**
- * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
- */
-static void ppc440spe_adma_memcpy_xor_set_src(
- struct ppc440spe_adma_desc_slot *sw_desc,
- dma_addr_t addr, int index)
-{
- struct ppc440spe_adma_chan *chan;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
- sw_desc = sw_desc->group_head;
-
- if (likely(sw_desc))
- ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
-}
-
-/**
- * ppc440spe_adma_dma2rxor_inc_addr -
- */
-static void ppc440spe_adma_dma2rxor_inc_addr(
- struct ppc440spe_adma_desc_slot *desc,
- struct ppc440spe_rxor *cursor, int index, int src_cnt)
-{
- cursor->addr_count++;
- if (index == src_cnt - 1) {
- ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
- } else if (cursor->addr_count == XOR_MAX_OPS) {
- ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
- cursor->addr_count = 0;
- cursor->desc_count++;
- }
-}
-
-/**
- * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
- */
-static int ppc440spe_adma_dma2rxor_prep_src(
- struct ppc440spe_adma_desc_slot *hdesc,
- struct ppc440spe_rxor *cursor, int index,
- int src_cnt, u32 addr)
-{
- int rval = 0;
- u32 sign;
- struct ppc440spe_adma_desc_slot *desc = hdesc;
- int i;
-
- for (i = 0; i < cursor->desc_count; i++) {
- desc = list_entry(hdesc->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- }
-
- switch (cursor->state) {
- case 0:
- if (addr == cursor->addrl + cursor->len) {
- /* direct RXOR */
- cursor->state = 1;
- cursor->xor_count++;
- if (index == src_cnt-1) {
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR12 << DMA_CUED_REGION_OFF);
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- } else if (cursor->addrl == addr + cursor->len) {
- /* reverse RXOR */
- cursor->state = 1;
- cursor->xor_count++;
- set_bit(cursor->addr_count, &desc->reverse_flags[0]);
- if (index == src_cnt-1) {
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR12 << DMA_CUED_REGION_OFF);
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- } else {
- printk(KERN_ERR "Cannot build "
- "DMA2 RXOR command block.\n");
- BUG();
- }
- break;
- case 1:
- sign = test_bit(cursor->addr_count,
- desc->reverse_flags)
- ? -1 : 1;
- if (index == src_cnt-2 || (sign == -1
- && addr != cursor->addrl - 2*cursor->len)) {
- cursor->state = 0;
- cursor->xor_count = 1;
- cursor->addrl = addr;
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR12 << DMA_CUED_REGION_OFF);
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- } else if (addr == cursor->addrl + 2*sign*cursor->len) {
- cursor->state = 2;
- cursor->xor_count = 0;
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR123 << DMA_CUED_REGION_OFF);
- if (index == src_cnt-1) {
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- } else if (addr == cursor->addrl + 3*cursor->len) {
- cursor->state = 2;
- cursor->xor_count = 0;
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR124 << DMA_CUED_REGION_OFF);
- if (index == src_cnt-1) {
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- } else if (addr == cursor->addrl + 4*cursor->len) {
- cursor->state = 2;
- cursor->xor_count = 0;
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR125 << DMA_CUED_REGION_OFF);
- if (index == src_cnt-1) {
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- } else {
- cursor->state = 0;
- cursor->xor_count = 1;
- cursor->addrl = addr;
- ppc440spe_rxor_set_region(desc,
- cursor->addr_count,
- DMA_RXOR12 << DMA_CUED_REGION_OFF);
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- break;
- case 2:
- cursor->state = 0;
- cursor->addrl = addr;
- cursor->xor_count++;
- if (index) {
- ppc440spe_adma_dma2rxor_inc_addr(
- desc, cursor, index, src_cnt);
- }
- break;
- }
-
- return rval;
-}
-
-/**
- * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
- * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
- */
-static void ppc440spe_adma_dma2rxor_set_src(
- struct ppc440spe_adma_desc_slot *desc,
- int index, dma_addr_t addr)
-{
- struct xor_cb *xcb = desc->hw_desc;
- int k = 0, op = 0, lop = 0;
-
- /* get the RXOR operand which corresponds to index addr */
- while (op <= index) {
- lop = op;
- if (k == XOR_MAX_OPS) {
- k = 0;
- desc = list_entry(desc->chain_node.next,
- struct ppc440spe_adma_desc_slot, chain_node);
- xcb = desc->hw_desc;
-
- }
- if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
- (DMA_RXOR12 << DMA_CUED_REGION_OFF))
- op += 2;
- else
- op += 3;
- }
-
- BUG_ON(k < 1);
-
- if (test_bit(k-1, desc->reverse_flags)) {
- /* reverse operand order; put last op in RXOR group */
- if (index == op - 1)
- ppc440spe_rxor_set_src(desc, k - 1, addr);
- } else {
- /* direct operand order; put first op in RXOR group */
- if (index == lop)
- ppc440spe_rxor_set_src(desc, k - 1, addr);
- }
-}
-
-/**
- * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
- * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
- */
-static void ppc440spe_adma_dma2rxor_set_mult(
- struct ppc440spe_adma_desc_slot *desc,
- int index, u8 mult)
-{
- struct xor_cb *xcb = desc->hw_desc;
- int k = 0, op = 0, lop = 0;
-
- /* get the RXOR operand which corresponds to index mult */
- while (op <= index) {
- lop = op;
- if (k == XOR_MAX_OPS) {
- k = 0;
- desc = list_entry(desc->chain_node.next,
- struct ppc440spe_adma_desc_slot,
- chain_node);
- xcb = desc->hw_desc;
-
- }
- if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
- (DMA_RXOR12 << DMA_CUED_REGION_OFF))
- op += 2;
- else
- op += 3;
- }
-
- BUG_ON(k < 1);
- if (test_bit(k-1, desc->reverse_flags)) {
- /* reverse order */
- ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
- } else {
- /* direct order */
- ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
- }
-}
-
-/**
- * ppc440spe_init_rxor_cursor -
- */
-static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
-{
- memset(cursor, 0, sizeof(struct ppc440spe_rxor));
- cursor->state = 2;
-}
-
-/**
- * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
- * descriptor for the PQXOR operation
- */
-static void ppc440spe_adma_pq_set_src_mult(
- struct ppc440spe_adma_desc_slot *sw_desc,
- unsigned char mult, int index, int dst_pos)
-{
- struct ppc440spe_adma_chan *chan;
- u32 mult_idx, mult_dst;
- struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
-
- chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
-
- switch (chan->device->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
- int region = test_bit(PPC440SPE_DESC_RXOR12,
- &sw_desc->flags) ? 2 : 3;
-
- if (index < region) {
- /* RXOR multipliers */
- iter = ppc440spe_get_group_entry(sw_desc,
- sw_desc->dst_cnt - 1);
- if (sw_desc->dst_cnt == 2)
- iter1 = ppc440spe_get_group_entry(
- sw_desc, 0);
-
- mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
- mult_dst = DMA_CDB_SG_SRC;
- } else {
- /* WXOR multiplier */
- iter = ppc440spe_get_group_entry(sw_desc,
- index - region +
- sw_desc->dst_cnt);
- mult_idx = DMA_CUED_MULT1_OFF;
- mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
- DMA_CDB_SG_DST1;
- }
- } else {
- int znum = 0;
-
- /* WXOR-only;
- * skip first slots with destinations (if ZERO_DST has
- * place)
- */
- if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
- znum++;
- if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
- znum++;
-
- iter = ppc440spe_get_group_entry(sw_desc, index + znum);
- mult_idx = DMA_CUED_MULT1_OFF;
- mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
- }
-
- if (likely(iter)) {
- ppc440spe_desc_set_src_mult(iter, chan,
- mult_idx, mult_dst, mult);
-
- if (unlikely(iter1)) {
- /* if we have two destinations for RXOR, then
- * we've just set Q mult. Set-up P now.
- */
- ppc440spe_desc_set_src_mult(iter1, chan,
- mult_idx, mult_dst, 1);
- }
-
- }
- break;
-
- case PPC440SPE_XOR_ID:
- iter = sw_desc->group_head;
- if (sw_desc->dst_cnt == 2) {
- /* both P & Q calculations required; set P mult here */
- ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
-
- /* and then set Q mult */
- iter = ppc440spe_get_group_entry(sw_desc,
- sw_desc->descs_per_op);
- }
- ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
- break;
- }
-}
-
-/**
- * ppc440spe_adma_free_chan_resources - free the resources allocated
- */
-static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *iter, *_iter;
- int in_use_descs = 0;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- ppc440spe_adma_slot_cleanup(ppc440spe_chan);
-
- spin_lock_bh(&ppc440spe_chan->lock);
- list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
- chain_node) {
- in_use_descs++;
- list_del(&iter->chain_node);
- }
- list_for_each_entry_safe_reverse(iter, _iter,
- &ppc440spe_chan->all_slots, slot_node) {
- list_del(&iter->slot_node);
- kfree(iter);
- ppc440spe_chan->slots_allocated--;
- }
- ppc440spe_chan->last_used = NULL;
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d %s slots_allocated %d\n",
- ppc440spe_chan->device->id,
- __func__, ppc440spe_chan->slots_allocated);
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- /* one is ok since we left it on there on purpose */
- if (in_use_descs > 1)
- printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
- in_use_descs - 1);
-}
-
-/**
- * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
- * @chan: ADMA channel handle
- * @cookie: ADMA transaction identifier
- */
-static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- enum dma_status ret;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- last_used = chan->cookie;
- last_complete = ppc440spe_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret == DMA_SUCCESS)
- return ret;
-
- ppc440spe_adma_slot_cleanup(ppc440spe_chan);
-
- last_used = chan->cookie;
- last_complete = ppc440spe_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
- return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
-/**
- * ppc440spe_adma_eot_handler - end of transfer interrupt handler
- */
-static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
-{
- struct ppc440spe_adma_chan *chan = data;
-
- dev_dbg(chan->device->common.dev,
- "ppc440spe adma%d: %s\n", chan->device->id, __func__);
-
- tasklet_schedule(&chan->irq_tasklet);
- ppc440spe_adma_device_clear_eot_status(chan);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ppc440spe_adma_err_handler - DMA error interrupt handler;
- * do the same things as a eot handler
- */
-static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
-{
- struct ppc440spe_adma_chan *chan = data;
-
- dev_dbg(chan->device->common.dev,
- "ppc440spe adma%d: %s\n", chan->device->id, __func__);
-
- tasklet_schedule(&chan->irq_tasklet);
- ppc440spe_adma_device_clear_eot_status(chan);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ppc440spe_test_callback - called when test operation has been done
- */
-static void ppc440spe_test_callback(void *unused)
-{
- complete(&ppc440spe_r6_test_comp);
-}
-
-/**
- * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
- */
-static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
- __func__, ppc440spe_chan->pending);
-
- if (ppc440spe_chan->pending) {
- ppc440spe_chan->pending = 0;
- ppc440spe_chan_append(ppc440spe_chan);
- }
-}
-
-/**
- * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
- * use FIFOs (as opposite to chains used in XOR) so this is a XOR
- * specific operation)
- */
-static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
-{
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- dma_cookie_t cookie;
- int slot_cnt, slots_per_op;
-
- dev_dbg(chan->device->common.dev,
- "ppc440spe adma%d: %s\n", chan->device->id, __func__);
-
- spin_lock_bh(&chan->lock);
- slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
- sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- list_splice_init(&sw_desc->group_list, &chan->chain);
- async_tx_ack(&sw_desc->async_tx);
- ppc440spe_desc_init_null_xor(group_start);
-
- cookie = chan->common.cookie;
- cookie++;
- if (cookie <= 1)
- cookie = 2;
-
- /* initialize the completed cookie to be less than
- * the most recently used cookie
- */
- chan->completed_cookie = cookie - 1;
- chan->common.cookie = sw_desc->async_tx.cookie = cookie;
-
- /* channel should not be busy */
- BUG_ON(ppc440spe_chan_is_busy(chan));
-
- /* set the descriptor address */
- ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
-
- /* run the descriptor */
- ppc440spe_chan_run(chan);
- } else
- printk(KERN_ERR "ppc440spe adma%d"
- " failed to allocate null descriptor\n",
- chan->device->id);
- spin_unlock_bh(&chan->lock);
-}
-
-/**
- * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
- * For this we just perform one WXOR operation with the same source
- * and destination addresses, the GF-multiplier is 1; so if RAID-6
- * capabilities are enabled then we'll get src/dst filled with zero.
- */
-static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
-{
- struct ppc440spe_adma_desc_slot *sw_desc, *iter;
- struct page *pg;
- char *a;
- dma_addr_t dma_addr, addrs[2];
- unsigned long op = 0;
- int rval = 0;
-
- set_bit(PPC440SPE_DESC_WXOR, &op);
-
- pg = alloc_page(GFP_KERNEL);
- if (!pg)
- return -ENOMEM;
-
- spin_lock_bh(&chan->lock);
- sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
- if (sw_desc) {
- /* 1 src, 1 dsr, int_ena, WXOR */
- ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
- list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
- ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
- iter->unmap_len = PAGE_SIZE;
- }
- } else {
- rval = -EFAULT;
- spin_unlock_bh(&chan->lock);
- goto exit;
- }
- spin_unlock_bh(&chan->lock);
-
- /* Fill the test page with ones */
- memset(page_address(pg), 0xFF, PAGE_SIZE);
- dma_addr = dma_map_page(chan->device->dev, pg, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
-
- /* Setup addresses */
- ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
- ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
- addrs[0] = dma_addr;
- addrs[1] = 0;
- ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
-
- async_tx_ack(&sw_desc->async_tx);
- sw_desc->async_tx.callback = ppc440spe_test_callback;
- sw_desc->async_tx.callback_param = NULL;
-
- init_completion(&ppc440spe_r6_test_comp);
-
- ppc440spe_adma_tx_submit(&sw_desc->async_tx);
- ppc440spe_adma_issue_pending(&chan->common);
-
- wait_for_completion(&ppc440spe_r6_test_comp);
-
- /* Now check if the test page is zeroed */
- a = page_address(pg);
- if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
- /* page is zero - RAID-6 enabled */
- rval = 0;
- } else {
- /* RAID-6 was not enabled */
- rval = -EINVAL;
- }
-exit:
- __free_page(pg);
- return rval;
-}
-
-static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
-{
- switch (adev->id) {
- case PPC440SPE_DMA0_ID:
- case PPC440SPE_DMA1_ID:
- dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
- dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
- dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
- dma_cap_set(DMA_PQ, adev->common.cap_mask);
- dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
- dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
- break;
- case PPC440SPE_XOR_ID:
- dma_cap_set(DMA_XOR, adev->common.cap_mask);
- dma_cap_set(DMA_PQ, adev->common.cap_mask);
- dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
- adev->common.cap_mask = adev->common.cap_mask;
- break;
- }
-
- /* Set base routines */
- adev->common.device_alloc_chan_resources =
- ppc440spe_adma_alloc_chan_resources;
- adev->common.device_free_chan_resources =
- ppc440spe_adma_free_chan_resources;
- adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
- adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
-
- /* Set prep routines based on capability */
- if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
- adev->common.device_prep_dma_memcpy =
- ppc440spe_adma_prep_dma_memcpy;
- }
- if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
- adev->common.device_prep_dma_memset =
- ppc440spe_adma_prep_dma_memset;
- }
- if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
- adev->common.max_xor = XOR_MAX_OPS;
- adev->common.device_prep_dma_xor =
- ppc440spe_adma_prep_dma_xor;
- }
- if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
- switch (adev->id) {
- case PPC440SPE_DMA0_ID:
- dma_set_maxpq(&adev->common,
- DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
- break;
- case PPC440SPE_DMA1_ID:
- dma_set_maxpq(&adev->common,
- DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
- break;
- case PPC440SPE_XOR_ID:
- adev->common.max_pq = XOR_MAX_OPS * 3;
- break;
- }
- adev->common.device_prep_dma_pq =
- ppc440spe_adma_prep_dma_pq;
- }
- if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
- switch (adev->id) {
- case PPC440SPE_DMA0_ID:
- adev->common.max_pq = DMA0_FIFO_SIZE /
- sizeof(struct dma_cdb);
- break;
- case PPC440SPE_DMA1_ID:
- adev->common.max_pq = DMA1_FIFO_SIZE /
- sizeof(struct dma_cdb);
- break;
- }
- adev->common.device_prep_dma_pq_val =
- ppc440spe_adma_prep_dma_pqzero_sum;
- }
- if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
- switch (adev->id) {
- case PPC440SPE_DMA0_ID:
- adev->common.max_xor = DMA0_FIFO_SIZE /
- sizeof(struct dma_cdb);
- break;
- case PPC440SPE_DMA1_ID:
- adev->common.max_xor = DMA1_FIFO_SIZE /
- sizeof(struct dma_cdb);
- break;
- }
- adev->common.device_prep_dma_xor_val =
- ppc440spe_adma_prep_dma_xor_zero_sum;
- }
- if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
- adev->common.device_prep_dma_interrupt =
- ppc440spe_adma_prep_dma_interrupt;
- }
- pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
- "( %s%s%s%s%s%s%s)\n",
- dev_name(adev->dev),
- dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
- dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
- dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
- dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
- dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
- dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
- dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
-}
-
-static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
- struct ppc440spe_adma_chan *chan,
- int *initcode)
-{
- struct device_node *np;
- int ret;
-
- np = container_of(adev->dev, struct of_device, dev)->node;
- if (adev->id != PPC440SPE_XOR_ID) {
- adev->err_irq = irq_of_parse_and_map(np, 1);
- if (adev->err_irq == NO_IRQ) {
- dev_warn(adev->dev, "no err irq resource?\n");
- *initcode = PPC_ADMA_INIT_IRQ2;
- adev->err_irq = -ENXIO;
- } else
- atomic_inc(&ppc440spe_adma_err_irq_ref);
- } else {
- adev->err_irq = -ENXIO;
- }
-
- adev->irq = irq_of_parse_and_map(np, 0);
- if (adev->irq == NO_IRQ) {
- dev_err(adev->dev, "no irq resource\n");
- *initcode = PPC_ADMA_INIT_IRQ1;
- ret = -ENXIO;
- goto err_irq_map;
- }
- dev_dbg(adev->dev, "irq %d, err irq %d\n",
- adev->irq, adev->err_irq);
-
- ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
- 0, dev_driver_string(adev->dev), chan);
- if (ret) {
- dev_err(adev->dev, "can't request irq %d\n",
- adev->irq);
- *initcode = PPC_ADMA_INIT_IRQ1;
- ret = -EIO;
- goto err_req1;
- }
-
- /* only DMA engines have a separate error IRQ
- * so it's Ok if err_irq < 0 in XOR engine case.
- */
- if (adev->err_irq > 0) {
- /* both DMA engines share common error IRQ */
- ret = request_irq(adev->err_irq,
- ppc440spe_adma_err_handler,
- IRQF_SHARED,
- dev_driver_string(adev->dev),
- chan);
- if (ret) {
- dev_err(adev->dev, "can't request irq %d\n",
- adev->err_irq);
- *initcode = PPC_ADMA_INIT_IRQ2;
- ret = -EIO;
- goto err_req2;
- }
- }
-
- if (adev->id == PPC440SPE_XOR_ID) {
- /* enable XOR engine interrupts */
- iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
- XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
- &adev->xor_reg->ier);
- } else {
- u32 mask, enable;
-
- np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
- if (!np) {
- pr_err("%s: can't find I2O device tree node\n",
- __func__);
- ret = -ENODEV;
- goto err_req2;
- }
- adev->i2o_reg = of_iomap(np, 0);
- if (!adev->i2o_reg) {
- pr_err("%s: failed to map I2O registers\n", __func__);
- of_node_put(np);
- ret = -EINVAL;
- goto err_req2;
- }
- of_node_put(np);
- /* Unmask 'CS FIFO Attention' interrupts and
- * enable generating interrupts on errors
- */
- enable = (adev->id == PPC440SPE_DMA0_ID) ?
- ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
- ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
- mask = ioread32(&adev->i2o_reg->iopim) & enable;
- iowrite32(mask, &adev->i2o_reg->iopim);
- }
- return 0;
-
-err_req2:
- free_irq(adev->irq, chan);
-err_req1:
- irq_dispose_mapping(adev->irq);
-err_irq_map:
- if (adev->err_irq > 0) {
- if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
- irq_dispose_mapping(adev->err_irq);
- }
- return ret;
-}
-
-static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
- struct ppc440spe_adma_chan *chan)
-{
- u32 mask, disable;
-
- if (adev->id == PPC440SPE_XOR_ID) {
- /* disable XOR engine interrupts */
- mask = ioread32be(&adev->xor_reg->ier);
- mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
- XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
- iowrite32be(mask, &adev->xor_reg->ier);
- } else {
- /* disable DMAx engine interrupts */
- disable = (adev->id == PPC440SPE_DMA0_ID) ?
- (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
- (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
- mask = ioread32(&adev->i2o_reg->iopim) | disable;
- iowrite32(mask, &adev->i2o_reg->iopim);
- }
- free_irq(adev->irq, chan);
- irq_dispose_mapping(adev->irq);
- if (adev->err_irq > 0) {
- free_irq(adev->err_irq, chan);
- if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
- irq_dispose_mapping(adev->err_irq);
- iounmap(adev->i2o_reg);
- }
- }
-}
-
-/**
- * ppc440spe_adma_probe - probe the asynch device
- */
-static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
- const struct of_device_id *match)
-{
- struct device_node *np = ofdev->node;
- struct resource res;
- struct ppc440spe_adma_device *adev;
- struct ppc440spe_adma_chan *chan;
- struct ppc_dma_chan_ref *ref, *_ref;
- int ret = 0, initcode = PPC_ADMA_INIT_OK;
- const u32 *idx;
- int len;
- void *regs;
- u32 id, pool_size;
-
- if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
- id = PPC440SPE_XOR_ID;
- /* As far as the XOR engine is concerned, it does not
- * use FIFOs but uses linked list. So there is no dependency
- * between pool size to allocate and the engine configuration.
- */
- pool_size = PAGE_SIZE << 1;
- } else {
- /* it is DMA0 or DMA1 */
- idx = of_get_property(np, "cell-index", &len);
- if (!idx || (len != sizeof(u32))) {
- dev_err(&ofdev->dev, "Device node %s has missing "
- "or invalid cell-index property\n",
- np->full_name);
- return -EINVAL;
- }
- id = *idx;
- /* DMA0,1 engines use FIFO to maintain CDBs, so we
- * should allocate the pool accordingly to size of this
- * FIFO. Thus, the pool size depends on the FIFO depth:
- * how much CDBs pointers the FIFO may contain then so
- * much CDBs we should provide in the pool.
- * That is
- * CDB size = 32B;
- * CDBs number = (DMA0_FIFO_SIZE >> 3);
- * Pool size = CDBs number * CDB size =
- * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
- */
- pool_size = (id == PPC440SPE_DMA0_ID) ?
- DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
- pool_size <<= 2;
- }
-
- if (of_address_to_resource(np, 0, &res)) {
- dev_err(&ofdev->dev, "failed to get memory resource\n");
- initcode = PPC_ADMA_INIT_MEMRES;
- ret = -ENODEV;
- goto out;
- }
-
- if (!request_mem_region(res.start, resource_size(&res),
- dev_driver_string(&ofdev->dev))) {
- dev_err(&ofdev->dev, "failed to request memory region "
- "(0x%016llx-0x%016llx)\n",
- (u64)res.start, (u64)res.end);
- initcode = PPC_ADMA_INIT_MEMREG;
- ret = -EBUSY;
- goto out;
- }
-
- /* create a device */
- adev = kzalloc(sizeof(*adev), GFP_KERNEL);
- if (!adev) {
- dev_err(&ofdev->dev, "failed to allocate device\n");
- initcode = PPC_ADMA_INIT_ALLOC;
- ret = -ENOMEM;
- goto err_adev_alloc;
- }
-
- adev->id = id;
- adev->pool_size = pool_size;
- /* allocate coherent memory for hardware descriptors */
- adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
- adev->pool_size, &adev->dma_desc_pool,
- GFP_KERNEL);
- if (adev->dma_desc_pool_virt == NULL) {
- dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
- "memory for hardware descriptors\n",
- adev->pool_size);
- initcode = PPC_ADMA_INIT_COHERENT;
- ret = -ENOMEM;
- goto err_dma_alloc;
- }
- dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n",
- adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
-
- regs = ioremap(res.start, resource_size(&res));
- if (!regs) {
- dev_err(&ofdev->dev, "failed to ioremap regs!\n");
- goto err_regs_alloc;
- }
-
- if (adev->id == PPC440SPE_XOR_ID) {
- adev->xor_reg = regs;
- /* Reset XOR */
- iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
- iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
- } else {
- size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
- DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
- adev->dma_reg = regs;
- /* DMAx_FIFO_SIZE is defined in bytes,
- * - is defined in number of CDB pointers (8byte).
- * DMA FIFO Length = CSlength + CPlength, where
- * CSlength = CPlength = (fsiz + 1) * 8.
- */
- iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
- &adev->dma_reg->fsiz);
- /* Configure DMA engine */
- iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
- &adev->dma_reg->cfg);
- /* Clear Status */
- iowrite32(~0, &adev->dma_reg->dsts);
- }
-
- adev->dev = &ofdev->dev;
- adev->common.dev = &ofdev->dev;
- INIT_LIST_HEAD(&adev->common.channels);
- dev_set_drvdata(&ofdev->dev, adev);
-
- /* create a channel */
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan) {
- dev_err(&ofdev->dev, "can't allocate channel structure\n");
- initcode = PPC_ADMA_INIT_CHANNEL;
- ret = -ENOMEM;
- goto err_chan_alloc;
- }
-
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->chain);
- INIT_LIST_HEAD(&chan->all_slots);
- chan->device = adev;
- chan->common.device = &adev->common;
- list_add_tail(&chan->common.device_node, &adev->common.channels);
- tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
- (unsigned long)chan);
-
- /* allocate and map helper pages for async validation or
- * async_mult/async_sum_product operations on DMA0/1.
- */
- if (adev->id != PPC440SPE_XOR_ID) {
- chan->pdest_page = alloc_page(GFP_KERNEL);
- chan->qdest_page = alloc_page(GFP_KERNEL);
- if (!chan->pdest_page ||
- !chan->qdest_page) {
- if (chan->pdest_page)
- __free_page(chan->pdest_page);
- if (chan->qdest_page)
- __free_page(chan->qdest_page);
- ret = -ENOMEM;
- goto err_page_alloc;
- }
- chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
-
- ref = kmalloc(sizeof(*ref), GFP_KERNEL);
- if (ref) {
- ref->chan = &chan->common;
- INIT_LIST_HEAD(&ref->node);
- list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
- } else {
- dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
- ret = -ENOMEM;
- goto err_ref_alloc;
- }
-
- ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
- if (ret)
- goto err_irq;
-
- ppc440spe_adma_init_capabilities(adev);
-
- ret = dma_async_device_register(&adev->common);
- if (ret) {
- initcode = PPC_ADMA_INIT_REGISTER;
- dev_err(&ofdev->dev, "failed to register dma device\n");
- goto err_dev_reg;
- }
-
- goto out;
-
-err_dev_reg:
- ppc440spe_adma_release_irqs(adev, chan);
-err_irq:
- list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
- if (chan == to_ppc440spe_adma_chan(ref->chan)) {
- list_del(&ref->node);
- kfree(ref);
- }
- }
-err_ref_alloc:
- if (adev->id != PPC440SPE_XOR_ID) {
- dma_unmap_page(&ofdev->dev, chan->pdest,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_page(&ofdev->dev, chan->qdest,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(chan->pdest_page);
- __free_page(chan->qdest_page);
- }
-err_page_alloc:
- kfree(chan);
-err_chan_alloc:
- if (adev->id == PPC440SPE_XOR_ID)
- iounmap(adev->xor_reg);
- else
- iounmap(adev->dma_reg);
-err_regs_alloc:
- dma_free_coherent(adev->dev, adev->pool_size,
- adev->dma_desc_pool_virt,
- adev->dma_desc_pool);
-err_dma_alloc:
- kfree(adev);
-err_adev_alloc:
- release_mem_region(res.start, resource_size(&res));
-out:
- if (id < PPC440SPE_ADMA_ENGINES_NUM)
- ppc440spe_adma_devices[id] = initcode;
-
- return ret;
-}
-
-/**
- * ppc440spe_adma_remove - remove the asynch device
- */
-static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
-{
- struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
- struct device_node *np = ofdev->node;
- struct resource res;
- struct dma_chan *chan, *_chan;
- struct ppc_dma_chan_ref *ref, *_ref;
- struct ppc440spe_adma_chan *ppc440spe_chan;
-
- dev_set_drvdata(&ofdev->dev, NULL);
- if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
- ppc440spe_adma_devices[adev->id] = -1;
-
- dma_async_device_unregister(&adev->common);
-
- list_for_each_entry_safe(chan, _chan, &adev->common.channels,
- device_node) {
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
- ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
- tasklet_kill(&ppc440spe_chan->irq_tasklet);
- if (adev->id != PPC440SPE_XOR_ID) {
- dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(ppc440spe_chan->pdest_page);
- __free_page(ppc440spe_chan->qdest_page);
- }
- list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
- node) {
- if (ppc440spe_chan ==
- to_ppc440spe_adma_chan(ref->chan)) {
- list_del(&ref->node);
- kfree(ref);
- }
- }
- list_del(&chan->device_node);
- kfree(ppc440spe_chan);
- }
-
- dma_free_coherent(adev->dev, adev->pool_size,
- adev->dma_desc_pool_virt, adev->dma_desc_pool);
- if (adev->id == PPC440SPE_XOR_ID)
- iounmap(adev->xor_reg);
- else
- iounmap(adev->dma_reg);
- of_address_to_resource(np, 0, &res);
- release_mem_region(res.start, resource_size(&res));
- kfree(adev);
- return 0;
-}
-
-/*
- * /sys driver interface to enable h/w RAID-6 capabilities
- * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
- * directory are "devices", "enable" and "poly".
- * "devices" shows available engines.
- * "enable" is used to enable RAID-6 capabilities or to check
- * whether these has been activated.
- * "poly" allows setting/checking used polynomial (for PPC440SPe only).
- */
-
-static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
-{
- ssize_t size = 0;
- int i;
-
- for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
- if (ppc440spe_adma_devices[i] == -1)
- continue;
- size += snprintf(buf + size, PAGE_SIZE - size,
- "PPC440SP(E)-ADMA.%d: %s\n", i,
- ppc_adma_errors[ppc440spe_adma_devices[i]]);
- }
- return size;
-}
-
-static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
-{
- return snprintf(buf, PAGE_SIZE,
- "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
- ppc440spe_r6_enabled ? "EN" : "DIS");
-}
-
-static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
- const char *buf, size_t count)
-{
- unsigned long val;
-
- if (!count || count > 11)
- return -EINVAL;
-
- if (!ppc440spe_r6_tchan)
- return -EFAULT;
-
- /* Write a key */
- sscanf(buf, "%lx", &val);
- dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
- isync();
-
- /* Verify whether it really works now */
- if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
- pr_info("PPC440SP(e) RAID-6 has been activated "
- "successfully\n");
- ppc440spe_r6_enabled = 1;
- } else {
- pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
- " Error key ?\n");
- ppc440spe_r6_enabled = 0;
- }
- return count;
-}
-
-static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
-{
- ssize_t size = 0;
- u32 reg;
-
-#ifdef CONFIG_440SP
- /* 440SP has fixed polynomial */
- reg = 0x4d;
-#else
- reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
- reg >>= MQ0_CFBHL_POLY;
- reg &= 0xFF;
-#endif
-
- size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
- "uses 0x1%02x polynomial.\n", reg);
- return size;
-}
-
-static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
- const char *buf, size_t count)
-{
- unsigned long reg, val;
-
-#ifdef CONFIG_440SP
- /* 440SP uses default 0x14D polynomial only */
- return -EINVAL;
-#endif
-
- if (!count || count > 6)
- return -EINVAL;
-
- /* e.g., 0x14D or 0x11D */
- sscanf(buf, "%lx", &val);
-
- if (val & ~0x1FF)
- return -EINVAL;
-
- val &= 0xFF;
- reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
- reg &= ~(0xFF << MQ0_CFBHL_POLY);
- reg |= val << MQ0_CFBHL_POLY;
- dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
-
- return count;
-}
-
-static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
-static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
- store_ppc440spe_r6enable);
-static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
- store_ppc440spe_r6poly);
-
-/*
- * Common initialisation for RAID engines; allocate memory for
- * DMAx FIFOs, perform configuration common for all DMA engines.
- * Further DMA engine specific configuration is done at probe time.
- */
-static int ppc440spe_configure_raid_devices(void)
-{
- struct device_node *np;
- struct resource i2o_res;
- struct i2o_regs __iomem *i2o_reg;
- dcr_host_t i2o_dcr_host;
- unsigned int dcr_base, dcr_len;
- int i, ret;
-
- np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
- if (!np) {
- pr_err("%s: can't find I2O device tree node\n",
- __func__);
- return -ENODEV;
- }
-
- if (of_address_to_resource(np, 0, &i2o_res)) {
- of_node_put(np);
- return -EINVAL;
- }
-
- i2o_reg = of_iomap(np, 0);
- if (!i2o_reg) {
- pr_err("%s: failed to map I2O registers\n", __func__);
- of_node_put(np);
- return -EINVAL;
- }
-
- /* Get I2O DCRs base */
- dcr_base = dcr_resource_start(np, 0);
- dcr_len = dcr_resource_len(np, 0);
- if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
- of_node_put(np);
- iounmap(i2o_reg);
- return -ENODEV;
- }
-
- i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
- if (!DCR_MAP_OK(i2o_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
- of_node_put(np);
- iounmap(i2o_reg);
- return -ENODEV;
- }
- of_node_put(np);
-
- /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
- * the base address of FIFO memory space.
- * Actually we need twice more physical memory than programmed in the
- * register (because there are two FIFOs for each DMA: CP and CS)
- */
- ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
- GFP_KERNEL);
- if (!ppc440spe_dma_fifo_buf) {
- pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
- iounmap(i2o_reg);
- dcr_unmap(i2o_dcr_host, dcr_len);
- return -ENOMEM;
- }
-
- /*
- * Configure h/w
- */
- /* Reset I2O/DMA */
- mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
- mtdcri(SDR0, DCRN_SDR0_SRST, 0);
-
- /* Setup the base address of mmaped registers */
- dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
- dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
- I2O_REG_ENABLE);
- dcr_unmap(i2o_dcr_host, dcr_len);
-
- /* Setup FIFO memory space base address */
- iowrite32(0, &i2o_reg->ifbah);
- iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
-
- /* set zero FIFO size for I2O, so the whole
- * ppc440spe_dma_fifo_buf is used by DMAs.
- * DMAx_FIFOs will be configured while probe.
- */
- iowrite32(0, &i2o_reg->ifsiz);
- iounmap(i2o_reg);
-
- /* To prepare WXOR/RXOR functionality we need access to
- * Memory Queue Module DCRs (finally it will be enabled
- * via /sys interface of the ppc440spe ADMA driver).
- */
- np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
- if (!np) {
- pr_err("%s: can't find MQ device tree node\n",
- __func__);
- ret = -ENODEV;
- goto out_free;
- }
-
- /* Get MQ DCRs base */
- dcr_base = dcr_resource_start(np, 0);
- dcr_len = dcr_resource_len(np, 0);
- if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
- ret = -ENODEV;
- goto out_mq;
- }
-
- ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
- if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
- ret = -ENODEV;
- goto out_mq;
- }
- of_node_put(np);
- ppc440spe_mq_dcr_len = dcr_len;
-
- /* Set HB alias */
- dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
-
- /* Set:
- * - LL transaction passing limit to 1;
- * - Memory controller cycle limit to 1;
- * - Galois Polynomial to 0x14d (default)
- */
- dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
- (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
- (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
-
- atomic_set(&ppc440spe_adma_err_irq_ref, 0);
- for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
- ppc440spe_adma_devices[i] = -1;
-
- return 0;
-
-out_mq:
- of_node_put(np);
-out_free:
- kfree(ppc440spe_dma_fifo_buf);
- return ret;
-}
-
-static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
- { .compatible = "ibm,dma-440spe", },
- { .compatible = "amcc,xor-accelerator", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
-
-static struct of_platform_driver ppc440spe_adma_driver = {
- .match_table = ppc440spe_adma_of_match,
- .probe = ppc440spe_adma_probe,
- .remove = __devexit_p(ppc440spe_adma_remove),
- .driver = {
- .name = "PPC440SP(E)-ADMA",
- .owner = THIS_MODULE,
- },
-};
-
-static __init int ppc440spe_adma_init(void)
-{
- int ret;
-
- ret = ppc440spe_configure_raid_devices();
- if (ret)
- return ret;
-
- ret = of_register_platform_driver(&ppc440spe_adma_driver);
- if (ret) {
- pr_err("%s: failed to register platform driver\n",
- __func__);
- goto out_reg;
- }
-
- /* Initialization status */
- ret = driver_create_file(&ppc440spe_adma_driver.driver,
- &driver_attr_devices);
- if (ret)
- goto out_dev;
-
- /* RAID-6 h/w enable entry */
- ret = driver_create_file(&ppc440spe_adma_driver.driver,
- &driver_attr_enable);
- if (ret)
- goto out_en;
-
- /* GF polynomial to use */
- ret = driver_create_file(&ppc440spe_adma_driver.driver,
- &driver_attr_poly);
- if (!ret)
- return ret;
-
- driver_remove_file(&ppc440spe_adma_driver.driver,
- &driver_attr_enable);
-out_en:
- driver_remove_file(&ppc440spe_adma_driver.driver,
- &driver_attr_devices);
-out_dev:
- /* User will not be able to enable h/w RAID-6 */
- pr_err("%s: failed to create RAID-6 driver interface\n",
- __func__);
- of_unregister_platform_driver(&ppc440spe_adma_driver);
-out_reg:
- dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
- kfree(ppc440spe_dma_fifo_buf);
- return ret;
-}
-
-static void __exit ppc440spe_adma_exit(void)
-{
- driver_remove_file(&ppc440spe_adma_driver.driver,
- &driver_attr_poly);
- driver_remove_file(&ppc440spe_adma_driver.driver,
- &driver_attr_enable);
- driver_remove_file(&ppc440spe_adma_driver.driver,
- &driver_attr_devices);
- of_unregister_platform_driver(&ppc440spe_adma_driver);
- dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
- kfree(ppc440spe_dma_fifo_buf);
-}
-
-arch_initcall(ppc440spe_adma_init);
-module_exit(ppc440spe_adma_exit);
-
-MODULE_AUTHOR("Yuri Tikhonov ");
-MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
-MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/dma/ppc4xx/adma.h b/trunk/drivers/dma/ppc4xx/adma.h
deleted file mode 100644
index 8ada5a812e3b..000000000000
--- a/trunk/drivers/dma/ppc4xx/adma.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * 2006-2009 (C) DENX Software Engineering.
- *
- * Author: Yuri Tikhonov
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of
- * any kind, whether express or implied.
- */
-
-#ifndef _PPC440SPE_ADMA_H
-#define _PPC440SPE_ADMA_H
-
-#include
-#include "dma.h"
-#include "xor.h"
-
-#define to_ppc440spe_adma_chan(chan) \
- container_of(chan, struct ppc440spe_adma_chan, common)
-#define to_ppc440spe_adma_device(dev) \
- container_of(dev, struct ppc440spe_adma_device, common)
-#define tx_to_ppc440spe_adma_slot(tx) \
- container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
-
-/* Default polynomial (for 440SP is only available) */
-#define PPC440SPE_DEFAULT_POLY 0x4d
-
-#define PPC440SPE_ADMA_ENGINES_NUM (XOR_ENGINES_NUM + DMA_ENGINES_NUM)
-
-#define PPC440SPE_ADMA_WATCHDOG_MSEC 3
-#define PPC440SPE_ADMA_THRESHOLD 1
-
-#define PPC440SPE_DMA0_ID 0
-#define PPC440SPE_DMA1_ID 1
-#define PPC440SPE_XOR_ID 2
-
-#define PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT 0xFFFFFFUL
-/* this is the XOR_CBBCR width */
-#define PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
-#define PPC440SPE_ADMA_ZERO_SUM_MAX_BYTE_COUNT PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
-
-#define PPC440SPE_RXOR_RUN 0
-
-#define MQ0_CF2H_RXOR_BS_MASK 0x1FF
-
-#undef ADMA_LL_DEBUG
-
-/**
- * struct ppc440spe_adma_device - internal representation of an ADMA device
- * @dev: device
- * @dma_reg: base for DMAx register access
- * @xor_reg: base for XOR register access
- * @i2o_reg: base for I2O register access
- * @id: HW ADMA Device selector
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @pool_size: size of the pool
- * @irq: DMAx or XOR irq number
- * @err_irq: DMAx error irq number
- * @common: embedded struct dma_device
- */
-struct ppc440spe_adma_device {
- struct device *dev;
- struct dma_regs __iomem *dma_reg;
- struct xor_regs __iomem *xor_reg;
- struct i2o_regs __iomem *i2o_reg;
- int id;
- void *dma_desc_pool_virt;
- dma_addr_t dma_desc_pool;
- size_t pool_size;
- int irq;
- int err_irq;
- struct dma_device common;
-};
-
-/**
- * struct ppc440spe_adma_chan - internal representation of an ADMA channel
- * @lock: serializes enqueue/dequeue operations to the slot pool
- * @device: parent device
- * @chain: device chain view of the descriptors
- * @common: common dmaengine channel object members
- * @all_slots: complete domain of slots usable by the channel
- * @pending: allows batching of hardware operations
- * @completed_cookie: identifier for the most recently completed operation
- * @slots_allocated: records the actual size of the descriptor slot pool
- * @hw_chain_inited: h/w descriptor chain initialization flag
- * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
- * @needs_unmap: if buffers should not be unmapped upon final processing
- * @pdest_page: P destination page for async validate operation
- * @qdest_page: Q destination page for async validate operation
- * @pdest: P dma addr for async validate operation
- * @qdest: Q dma addr for async validate operation
- */
-struct ppc440spe_adma_chan {
- spinlock_t lock;
- struct ppc440spe_adma_device *device;
- struct list_head chain;
- struct dma_chan common;
- struct list_head all_slots;
- struct ppc440spe_adma_desc_slot *last_used;
- int pending;
- dma_cookie_t completed_cookie;
- int slots_allocated;
- int hw_chain_inited;
- struct tasklet_struct irq_tasklet;
- u8 needs_unmap;
- struct page *pdest_page;
- struct page *qdest_page;
- dma_addr_t pdest;
- dma_addr_t qdest;
-};
-
-struct ppc440spe_rxor {
- u32 addrl;
- u32 addrh;
- int len;
- int xor_count;
- int addr_count;
- int desc_count;
- int state;
-};
-
-/**
- * struct ppc440spe_adma_desc_slot - PPC440SPE-ADMA software descriptor
- * @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @hw_next: pointer to the next descriptor in chain
- * @async_tx: support for the async_tx api
- * @slot_node: node on the iop_adma_chan.all_slots list
- * @chain_node: node on the op_adma_chan.chain list
- * @group_list: list of slots that make up a multi-descriptor transaction
- * for example transfer lengths larger than the supported hw max
- * @unmap_len: transaction bytecount
- * @hw_desc: virtual address of the hardware descriptor chain
- * @stride: currently chained or not
- * @idx: pool index
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @src_cnt: number of sources set in this descriptor
- * @dst_cnt: number of destinations set in the descriptor
- * @slots_per_op: number of slots per operation
- * @descs_per_op: number of slot per P/Q operation see comment
- * for ppc440spe_prep_dma_pqxor function
- * @flags: desc state/type
- * @reverse_flags: 1 if a corresponding rxor address uses reversed address order
- * @xor_check_result: result of zero sum
- * @crc32_result: result crc calculation
- */
-struct ppc440spe_adma_desc_slot {
- dma_addr_t phys;
- struct ppc440spe_adma_desc_slot *group_head;
- struct ppc440spe_adma_desc_slot *hw_next;
- struct dma_async_tx_descriptor async_tx;
- struct list_head slot_node;
- struct list_head chain_node; /* node in channel ops list */
- struct list_head group_list; /* list */
- unsigned int unmap_len;
- void *hw_desc;
- u16 stride;
- u16 idx;
- u16 slot_cnt;
- u8 src_cnt;
- u8 dst_cnt;
- u8 slots_per_op;
- u8 descs_per_op;
- unsigned long flags;
- unsigned long reverse_flags[8];
-
-#define PPC440SPE_DESC_INT 0 /* generate interrupt on complete */
-#define PPC440SPE_ZERO_P 1 /* clear P destionaion */
-#define PPC440SPE_ZERO_Q 2 /* clear Q destination */
-#define PPC440SPE_COHERENT 3 /* src/dst are coherent */
-
-#define PPC440SPE_DESC_WXOR 4 /* WXORs are in chain */
-#define PPC440SPE_DESC_RXOR 5 /* RXOR is in chain */
-
-#define PPC440SPE_DESC_RXOR123 8 /* CDB for RXOR123 operation */
-#define PPC440SPE_DESC_RXOR124 9 /* CDB for RXOR124 operation */
-#define PPC440SPE_DESC_RXOR125 10 /* CDB for RXOR125 operation */
-#define PPC440SPE_DESC_RXOR12 11 /* CDB for RXOR12 operation */
-#define PPC440SPE_DESC_RXOR_REV 12 /* CDB has srcs in reversed order */
-
-#define PPC440SPE_DESC_PCHECK 13
-#define PPC440SPE_DESC_QCHECK 14
-
-#define PPC440SPE_DESC_RXOR_MSK 0x3
-
- struct ppc440spe_rxor rxor_cursor;
-
- union {
- u32 *xor_check_result;
- u32 *crc32_result;
- };
-};
-
-#endif /* _PPC440SPE_ADMA_H */
diff --git a/trunk/drivers/dma/ppc4xx/dma.h b/trunk/drivers/dma/ppc4xx/dma.h
deleted file mode 100644
index bcde2df2f373..000000000000
--- a/trunk/drivers/dma/ppc4xx/dma.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * 440SPe's DMA engines support header file
- *
- * 2006-2009 (C) DENX Software Engineering.
- *
- * Author: Yuri Tikhonov
- *
- * This file is licensed under the term of the GNU General Public License
- * version 2. The program licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef _PPC440SPE_DMA_H
-#define _PPC440SPE_DMA_H
-
-#include
-
-/* Number of elements in the array with statical CDBs */
-#define MAX_STAT_DMA_CDBS 16
-/* Number of DMA engines available on the contoller */
-#define DMA_ENGINES_NUM 2
-
-/* Maximum h/w supported number of destinations */
-#define DMA_DEST_MAX_NUM 2
-
-/* FIFO's params */
-#define DMA0_FIFO_SIZE 0x1000
-#define DMA1_FIFO_SIZE 0x1000
-#define DMA_FIFO_ENABLE (1<<12)
-
-/* DMA Configuration Register. Data Transfer Engine PLB Priority: */
-#define DMA_CFG_DXEPR_LP (0<<26)
-#define DMA_CFG_DXEPR_HP (3<<26)
-#define DMA_CFG_DXEPR_HHP (2<<26)
-#define DMA_CFG_DXEPR_HHHP (1<<26)
-
-/* DMA Configuration Register. DMA FIFO Manager PLB Priority: */
-#define DMA_CFG_DFMPP_LP (0<<23)
-#define DMA_CFG_DFMPP_HP (3<<23)
-#define DMA_CFG_DFMPP_HHP (2<<23)
-#define DMA_CFG_DFMPP_HHHP (1<<23)
-
-/* DMA Configuration Register. Force 64-byte Alignment */
-#define DMA_CFG_FALGN (1 << 19)
-
-/*UIC0:*/
-#define D0CPF_INT (1<<12)
-#define D0CSF_INT (1<<11)
-#define D1CPF_INT (1<<10)
-#define D1CSF_INT (1<<9)
-/*UIC1:*/
-#define DMAE_INT (1<<9)
-
-/* I2O IOP Interrupt Mask Register */
-#define I2O_IOPIM_P0SNE (1<<3)
-#define I2O_IOPIM_P0EM (1<<5)
-#define I2O_IOPIM_P1SNE (1<<6)
-#define I2O_IOPIM_P1EM (1<<8)
-
-/* DMA CDB fields */
-#define DMA_CDB_MSK (0xF)
-#define DMA_CDB_64B_ADDR (1<<2)
-#define DMA_CDB_NO_INT (1<<3)
-#define DMA_CDB_STATUS_MSK (0x3)
-#define DMA_CDB_ADDR_MSK (0xFFFFFFF0)
-
-/* DMA CDB OpCodes */
-#define DMA_CDB_OPC_NO_OP (0x00)
-#define DMA_CDB_OPC_MV_SG1_SG2 (0x01)
-#define DMA_CDB_OPC_MULTICAST (0x05)
-#define DMA_CDB_OPC_DFILL128 (0x24)
-#define DMA_CDB_OPC_DCHECK128 (0x23)
-
-#define DMA_CUED_XOR_BASE (0x10000000)
-#define DMA_CUED_XOR_HB (0x00000008)
-
-#ifdef CONFIG_440SP
-#define DMA_CUED_MULT1_OFF 0
-#define DMA_CUED_MULT2_OFF 8
-#define DMA_CUED_MULT3_OFF 16
-#define DMA_CUED_REGION_OFF 24
-#define DMA_CUED_XOR_WIN_MSK (0xFC000000)
-#else
-#define DMA_CUED_MULT1_OFF 2
-#define DMA_CUED_MULT2_OFF 10
-#define DMA_CUED_MULT3_OFF 18
-#define DMA_CUED_REGION_OFF 26
-#define DMA_CUED_XOR_WIN_MSK (0xF0000000)
-#endif
-
-#define DMA_CUED_REGION_MSK 0x3
-#define DMA_RXOR123 0x0
-#define DMA_RXOR124 0x1
-#define DMA_RXOR125 0x2
-#define DMA_RXOR12 0x3
-
-/* S/G addresses */
-#define DMA_CDB_SG_SRC 1
-#define DMA_CDB_SG_DST1 2
-#define DMA_CDB_SG_DST2 3
-
-/*
- * DMAx engines Command Descriptor Block Type
- */
-struct dma_cdb {
- /*
- * Basic CDB structure (Table 20-17, p.499, 440spe_um_1_22.pdf)
- */
- u8 pad0[2]; /* reserved */
- u8 attr; /* attributes */
- u8 opc; /* opcode */
- u32 sg1u; /* upper SG1 address */
- u32 sg1l; /* lower SG1 address */
- u32 cnt; /* SG count, 3B used */
- u32 sg2u; /* upper SG2 address */
- u32 sg2l; /* lower SG2 address */
- u32 sg3u; /* upper SG3 address */
- u32 sg3l; /* lower SG3 address */
-};
-
-/*
- * DMAx hardware registers (p.515 in 440SPe UM 1.22)
- */
-struct dma_regs {
- u32 cpfpl;
- u32 cpfph;
- u32 csfpl;
- u32 csfph;
- u32 dsts;
- u32 cfg;
- u8 pad0[0x8];
- u16 cpfhp;
- u16 cpftp;
- u16 csfhp;
- u16 csftp;
- u8 pad1[0x8];
- u32 acpl;
- u32 acph;
- u32 s1bpl;
- u32 s1bph;
- u32 s2bpl;
- u32 s2bph;
- u32 s3bpl;
- u32 s3bph;
- u8 pad2[0x10];
- u32 earl;
- u32 earh;
- u8 pad3[0x8];
- u32 seat;
- u32 sead;
- u32 op;
- u32 fsiz;
-};
-
-/*
- * I2O hardware registers (p.528 in 440SPe UM 1.22)
- */
-struct i2o_regs {
- u32 ists;
- u32 iseat;
- u32 isead;
- u8 pad0[0x14];
- u32 idbel;
- u8 pad1[0xc];
- u32 ihis;
- u32 ihim;
- u8 pad2[0x8];
- u32 ihiq;
- u32 ihoq;
- u8 pad3[0x8];
- u32 iopis;
- u32 iopim;
- u32 iopiq;
- u8 iopoq;
- u8 pad4[3];
- u16 iiflh;
- u16 iiflt;
- u16 iiplh;
- u16 iiplt;
- u16 ioflh;
- u16 ioflt;
- u16 ioplh;
- u16 ioplt;
- u32 iidc;
- u32 ictl;
- u32 ifcpp;
- u8 pad5[0x4];
- u16 mfac0;
- u16 mfac1;
- u16 mfac2;
- u16 mfac3;
- u16 mfac4;
- u16 mfac5;
- u16 mfac6;
- u16 mfac7;
- u16 ifcfh;
- u16 ifcht;
- u8 pad6[0x4];
- u32 iifmc;
- u32 iodb;
- u32 iodbc;
- u32 ifbal;
- u32 ifbah;
- u32 ifsiz;
- u32 ispd0;
- u32 ispd1;
- u32 ispd2;
- u32 ispd3;
- u32 ihipl;
- u32 ihiph;
- u32 ihopl;
- u32 ihoph;
- u32 iiipl;
- u32 iiiph;
- u32 iiopl;
- u32 iioph;
- u32 ifcpl;
- u32 ifcph;
- u8 pad7[0x8];
- u32 iopt;
-};
-
-#endif /* _PPC440SPE_DMA_H */
diff --git a/trunk/drivers/dma/ppc4xx/xor.h b/trunk/drivers/dma/ppc4xx/xor.h
deleted file mode 100644
index daed7384daac..000000000000
--- a/trunk/drivers/dma/ppc4xx/xor.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * 440SPe's XOR engines support header file
- *
- * 2006-2009 (C) DENX Software Engineering.
- *
- * Author: Yuri Tikhonov
- *
- * This file is licensed under the term of the GNU General Public License
- * version 2. The program licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef _PPC440SPE_XOR_H
-#define _PPC440SPE_XOR_H
-
-#include
-
-/* Number of XOR engines available on the contoller */
-#define XOR_ENGINES_NUM 1
-
-/* Number of operands supported in the h/w */
-#define XOR_MAX_OPS 16
-
-/*
- * XOR Command Block Control Register bits
- */
-#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
-#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
-#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
-#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
-#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
-#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
-
-/*
- * XORCore Status Register bits
- */
-#define XOR_SR_XCP_BIT (1<<31) /* core processing */
-#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
-#define XOR_SR_IC_BIT (1<<16) /* invalid command */
-#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
-#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
-#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
-#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
-
-/*
- * XORCore Control Set and Reset Register bits
- */
-#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
-#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
-#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
-#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
-#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
-#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
-
-/*
- * XORCore Interrupt Enable Register
- */
-#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block IRQ Enable */
-#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command IRQ Enable */
-#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error IRQ Enable */
-#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
-#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
-
-/*
- * XOR Accelerator engine Command Block Type
- */
-struct xor_cb {
- /*
- * Basic 64-bit format XOR CB (Table 19-1, p.463, 440spe_um_1_22.pdf)
- */
- u32 cbc; /* control */
- u32 cbbc; /* byte count */
- u32 cbs; /* status */
- u8 pad0[4]; /* reserved */
- u32 cbtah; /* target address high */
- u32 cbtal; /* target address low */
- u32 cblah; /* link address high */
- u32 cblal; /* link address low */
- struct {
- u32 h;
- u32 l;
- } __attribute__ ((packed)) ops[16];
-} __attribute__ ((packed));
-
-/*
- * XOR hardware registers Table 19-3, UM 1.22
- */
-struct xor_regs {
- u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
- u8 pad0[352]; /* reserved */
- u32 cbcr; /* CB control register */
- u32 cbbcr; /* CB byte count register */
- u32 cbsr; /* CB status register */
- u8 pad1[4]; /* reserved */
- u32 cbtahr; /* operand target address high register */
- u32 cbtalr; /* operand target address low register */
- u32 cblahr; /* CB link address high register */
- u32 cblalr; /* CB link address low register */
- u32 crsr; /* control set register */
- u32 crrr; /* control reset register */
- u32 ccbahr; /* current CB address high register */
- u32 ccbalr; /* current CB address low register */
- u32 plbr; /* PLB configuration register */
- u32 ier; /* interrupt enable register */
- u32 pecr; /* parity error count register */
- u32 sr; /* status register */
- u32 revidr; /* revision ID register */
-};
-
-#endif /* _PPC440SPE_XOR_H */
diff --git a/trunk/drivers/dma/shdma.c b/trunk/drivers/dma/shdma.c
index 2e4a54c8afeb..034ecf0ace03 100644
--- a/trunk/drivers/dma/shdma.c
+++ b/trunk/drivers/dma/shdma.c
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
unsigned short dmaor;
sh_dmae_ctl_stop(id);
- dmaor = dmaor_read_reg(id) | DMAOR_INIT;
+ dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
dmaor_write_reg(id, dmaor);
- if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
+ if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
-static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
+static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
if (chcr & CHCR_DE) {
@@ -110,14 +110,15 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
{
sh_dmae_writel(sh_chan, hw.sar, SAR);
sh_dmae_writel(sh_chan, hw.dar, DAR);
- sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
+ sh_dmae_writel(sh_chan,
+ (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
- chcr |= CHCR_DE | CHCR_IE;
+ chcr |= (CHCR_DE|CHCR_IE);
sh_dmae_writel(sh_chan, chcr, CHCR);
}
@@ -131,7 +132,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
- int ret = dmae_is_busy(sh_chan);
+ int ret = dmae_is_idle(sh_chan);
/* When DMA was working, can not set data to CHCR */
if (ret)
return ret;
@@ -148,7 +149,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
u32 addr;
int shift = 0;
- int ret = dmae_is_busy(sh_chan);
+ int ret = dmae_is_idle(sh_chan);
if (ret)
return ret;
@@ -306,7 +307,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
new = sh_dmae_get_desc(sh_chan);
if (!new) {
dev_err(sh_chan->dev,
- "No free memory for link descriptor\n");
+ "No free memory for link descriptor\n");
goto err_get_desc;
}
@@ -387,7 +388,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
struct sh_dmae_regs hw;
/* DMA work check */
- if (dmae_is_busy(sh_chan))
+ if (dmae_is_idle(sh_chan))
return;
/* Find the first un-transfer desciptor */
@@ -496,9 +497,8 @@ static void dmae_do_tasklet(unsigned long data)
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
struct sh_desc *desc, *_desc, *cur_desc = NULL;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
-
list_for_each_entry_safe(desc, _desc,
- &sh_chan->ld_queue, node) {
+ &sh_chan->ld_queue, node) {
if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
cur_desc = desc;
break;
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
/* alloc channel */
new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
if (!new_sh_chan) {
- dev_err(shdev->common.dev,
- "No free memory for allocating dma channels!\n");
+ dev_err(shdev->common.dev, "No free memory for allocating "
+ "dma channels!\n");
return -ENOMEM;
}
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
"sh-dmae%d", new_sh_chan->id);
/* set up channel irq */
- err = request_irq(irq, &sh_dmae_interrupt, irqflags,
- new_sh_chan->dev_id, new_sh_chan);
+ err = request_irq(irq, &sh_dmae_interrupt,
+ irqflags, new_sh_chan->dev_id, new_sh_chan);
if (err) {
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
"with return %d\n", id, err);
@@ -676,8 +676,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
shdev->common.dev = &pdev->dev;
- /* Default transfer size of 32 bytes requires 32-byte alignment */
- shdev->common.copy_align = 5;
#if defined(CONFIG_CPU_SH4)
/* Non Mix IRQ mode SH7722/SH7730 etc... */
@@ -690,8 +688,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
}
for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
- err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
+ err = request_irq(eirq[ecnt], sh_dmae_err,
+ irqflags, "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev, "DMA device request_irq"
"error (irq %d) with return %d\n",
diff --git a/trunk/drivers/dma/shdma.h b/trunk/drivers/dma/shdma.h
index 60b81e529b42..2b4bc15a2c0a 100644
--- a/trunk/drivers/dma/shdma.h
+++ b/trunk/drivers/dma/shdma.h
@@ -35,15 +35,15 @@ struct sh_desc {
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
- spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
- struct list_head ld_free; /* Link descriptors free */
- struct dma_chan common; /* DMA common channel */
- struct device *dev; /* Channel device */
+ spinlock_t desc_lock; /* Descriptor operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Link descriptors free */
+ struct dma_chan common; /* DMA common channel */
+ struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
- int descs_allocated; /* desc count */
+ int descs_allocated; /* desc count */
int id; /* Raw id of this channel */
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[16]; /* unique name per DMAC of channel */
/* Set chcr */
int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
diff --git a/trunk/drivers/edac/edac_mce_amd.c b/trunk/drivers/edac/edac_mce_amd.c
index 8fc91a019620..c693fcc2213c 100644
--- a/trunk/drivers/edac/edac_mce_amd.c
+++ b/trunk/drivers/edac/edac_mce_amd.c
@@ -299,12 +299,6 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (!handle_errors)
return;
- /*
- * GART TLB error reporting is disabled by default. Bail out early.
- */
- if (TLB_ERROR(ec) && !report_gart_errors)
- return;
-
pr_emerg(" Northbridge Error, node %d", node_id);
/*
@@ -316,9 +310,10 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
} else {
- pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
+ pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
}
+
pr_emerg("%s.\n", EXT_ERR_MSG(xec));
if (BUS_ERROR(ec) && nb_bus_decoder)
@@ -338,6 +333,21 @@ static void amd_decode_fr_mce(u64 mc5_status)
static inline void amd_decode_err_code(unsigned int ec)
{
if (TLB_ERROR(ec)) {
+ /*
+ * GART errors are intended to help graphics driver developers
+ * to detect bad GART PTEs. It is recommended by AMD to disable
+ * GART table walk error reporting by default[1] (currently
+ * being disabled in mce_cpu_quirks()) and according to the
+ * comment in mce_cpu_quirks(), such GART errors can be
+ * incorrectly triggered. We may see these errors anyway and
+ * unless requested by the user, they won't be reported.
+ *
+ * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
+ * AMD NPT family 0Fh processors
+ */
+ if (!report_gart_errors)
+ return;
+
pr_emerg(" Transaction: %s, Cache Level %s\n",
TT_MSG(ec), LL_MSG(ec));
} else if (MEM_ERROR(ec)) {
diff --git a/trunk/drivers/edac/i5100_edac.c b/trunk/drivers/edac/i5100_edac.c
index 7785d8ffa404..22db05a67bfb 100644
--- a/trunk/drivers/edac/i5100_edac.c
+++ b/trunk/drivers/edac/i5100_edac.c
@@ -9,11 +9,6 @@
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
- * The intel 5100 has two independent channels. EDAC core currently
- * can not reflect this configuration so instead the chip-select
- * rows for each respective channel are layed out one after another,
- * the first half belonging to channel 0, the second half belonging
- * to channel 1.
*/
#include
#include
@@ -30,8 +25,6 @@
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
-#define I5100_MC_SCRBEN_MASK (1 << 7)
-#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@@ -79,21 +72,11 @@
/* bit field accessors */
-static inline u32 i5100_mc_scrben(u32 mc)
-{
- return mc >> 7 & 1;
-}
-
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
-static inline u32 i5100_mc_scrbdone(u32 mc)
-{
- return mc >> 4 & 1;
-}
-
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
@@ -282,43 +265,42 @@ static inline u32 i5100_recmemb_ras(u32 a)
}
/* some generic limits */
-#define I5100_MAX_RANKS_PER_CHAN 6
-#define I5100_CHANNELS 2
+#define I5100_MAX_RANKS_PER_CTLR 6
+#define I5100_MAX_CTLRS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
+#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
-#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
+ int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
- * channel, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both channels use the same
+ * controller, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both controllers use the same
* map...
*
*/
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
- } mir[I5100_CHANNELS];
+ } mir[I5100_MAX_CTLRS];
/* adjusted memory interleave range register */
- unsigned amir[I5100_CHANNELS];
+ unsigned amir[I5100_MAX_CTLRS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
- } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
+ } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
@@ -328,33 +310,30 @@ struct i5100_priv {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
+ } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
u64 tolm; /* top of low memory in bytes */
- unsigned ranksperchan; /* number of ranks per channel */
+ unsigned ranksperctlr; /* number of ranks per controller */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
-
- struct delayed_work i5100_scrubbing;
- int scrub_enable;
};
-/* map a rank/chan to a slot number on the mainboard */
+/* map a rank/ctlr to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int chan, int rank)
+ int ctlr, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
int j;
- const int numrank = priv->dimm_numrank[chan][i];
+ const int numrank = priv->dimm_numrank[ctlr][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + chan;
+ return i * 2 + ctlr;
}
return -1;
@@ -395,32 +374,32 @@ static const char *i5100_err_msg(unsigned err)
return "none";
}
-/* convert csrow index into a rank (per channel -- 0..5) */
+/* convert csrow index into a rank (per controller -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow % priv->ranksperchan;
+ return csrow % priv->ranksperctlr;
}
-/* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
+/* convert csrow index into a controller (0..1) */
+static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow / priv->ranksperchan;
+ return csrow / priv->ranksperctlr;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
+ int ctlr, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
- return chan * priv->ranksperchan + rank;
+ return ctlr * priv->ranksperctlr + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
- int chan,
+ int ctlr,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -428,12 +407,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
+ "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
+ ctlr, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
@@ -442,7 +421,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
- int chan,
+ int ctlr,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -450,23 +429,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
+ "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
+ ctlr, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
-static void i5100_read_log(struct mem_ctl_info *mci, int chan,
+static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
+ struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
@@ -505,7 +484,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
@@ -527,7 +506,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@@ -555,80 +534,6 @@ static void i5100_check_error(struct mem_ctl_info *mci)
}
}
-/* The i5100 chipset will scrub the entire memory once, then
- * set a done bit. Continuous scrubbing is achieved by enqueing
- * delayed work to a workqueue, checking every few minutes if
- * the scrubbing has completed and if so reinitiating it.
- */
-
-static void i5100_refresh_scrubbing(struct work_struct *work)
-{
- struct delayed_work *i5100_scrubbing = container_of(work,
- struct delayed_work,
- work);
- struct i5100_priv *priv = container_of(i5100_scrubbing,
- struct i5100_priv,
- i5100_scrubbing);
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- if (priv->scrub_enable) {
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- if (i5100_mc_scrbdone(dw)) {
- dw |= I5100_MC_SCRBEN_MASK;
- pci_write_config_dword(priv->mc, I5100_MC, dw);
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
- }
-
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- }
-}
-/*
- * The bandwidth is based on experimentation, feel free to refine it.
- */
-static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
- u32 *bandwidth)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
- if (*bandwidth) {
- priv->scrub_enable = 1;
- dw |= I5100_MC_SCRBEN_MASK;
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- } else {
- priv->scrub_enable = 0;
- dw &= ~I5100_MC_SCRBEN_MASK;
- cancel_delayed_work(&(priv->i5100_scrubbing));
- }
- pci_write_config_dword(priv->mc, I5100_MC, dw);
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- *bandwidth = 5900000 * i5100_mc_scrben(dw);
-
- return 0;
-}
-
-static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
- u32 *bandwidth)
-{
- struct i5100_priv *priv = mci->pvt_info;
- u32 dw;
-
- pci_read_config_dword(priv->mc, I5100_MC, &dw);
-
- *bandwidth = 5900000 * i5100_mc_scrben(dw);
-
- return 0;
-}
-
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
@@ -652,19 +557,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned chan = i5100_csrow_to_chan(mci, csrow);
+ const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
unsigned addr_lines;
/* dimm present? */
- if (!priv->mtr[chan][chan_rank].present)
+ if (!priv->mtr[ctlr][ctlr_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
- priv->mtr[chan][chan_rank].numcol +
- priv->mtr[chan][chan_rank].numrow +
- priv->mtr[chan][chan_rank].numbank;
+ priv->mtr[ctlr][ctlr_rank].numcol +
+ priv->mtr[ctlr][ctlr_rank].numrow +
+ priv->mtr[ctlr][ctlr_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@@ -676,11 +581,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
- for (i = 0; i < I5100_CHANNELS; i++) {
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
int j;
struct pci_dev *pdev = mms[i];
- for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
+ for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
@@ -739,6 +644,7 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* fill dimm chip select map
*
* FIXME:
+ * o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
@@ -747,7 +653,9 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
+ WARN_ON(priv->ranksperctlr != 4);
+
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@@ -755,21 +663,12 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
/* only 2 chip selects per slot... */
- if (priv->ranksperchan == 4) {
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
- } else {
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 1;
- priv->dimm_csmap[1][0] = 2;
- priv->dimm_csmap[1][1] = 3;
- priv->dimm_csmap[2][0] = 4;
- priv->dimm_csmap[2][1] = 5;
- }
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@@ -778,10 +677,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_CHANNELS; i++) {
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
int j;
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@@ -821,7 +720,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
- for (i = 0; i < I5100_CHANNELS; i++) {
+ for (i = 0; i < I5100_MAX_CTLRS; i++) {
int j;
for (j = 0; j < 5; j++) {
@@ -848,7 +747,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
- const unsigned chan = i5100_csrow_to_chan(mci, i);
+ const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
@@ -866,7 +765,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
+ (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
@@ -878,7 +777,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
+ "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
total_pages += npages;
}
@@ -916,6 +815,13 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
+ if (ranksperch != 4) {
+ /* FIXME: get 6 ranks / controller to work - need hw... */
+ printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
+ ret = -ENODEV;
+ goto bail_pdev;
+ }
+
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@@ -958,21 +864,11 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->dev = &pdev->dev;
priv = mci->pvt_info;
- priv->ranksperchan = ranksperch;
+ priv->ranksperctlr = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
- INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
-
- /* If scrubbing was already enabled by the bios, start maintaining it */
- pci_read_config_dword(pdev, I5100_MC, &dw);
- if (i5100_mc_scrben(dw)) {
- priv->scrub_enable = 1;
- schedule_delayed_work(&(priv->i5100_scrubbing),
- I5100_SCRUB_REFRESH_RATE);
- }
-
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
@@ -986,8 +882,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
- mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
- mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
@@ -1003,14 +897,12 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
- goto bail_scrub;
+ goto bail_mc;
}
return ret;
-bail_scrub:
- priv->scrub_enable = 0;
- cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+bail_mc:
edac_mc_free(mci);
bail_disable_ch1:
@@ -1043,10 +935,6 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
return;
priv = mci->pvt_info;
-
- priv->scrub_enable = 0;
- cancel_delayed_work_sync(&(priv->i5100_scrubbing));
-
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig
index a019b49ecc9b..57ca339924ef 100644
--- a/trunk/drivers/gpio/Kconfig
+++ b/trunk/drivers/gpio/Kconfig
@@ -206,12 +206,6 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Moorestown platform GPIO.
-config GPIO_TIMBERDALE
- bool "Support for timberdale GPIO IP"
- depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
- ---help---
- Add support for the GPIO IP in the timberdale FPGA.
-
comment "SPI GPIO expanders:"
config GPIO_MAX7301
diff --git a/trunk/drivers/gpio/Makefile b/trunk/drivers/gpio/Makefile
index 52fe4cf734c7..270b6d7839f5 100644
--- a/trunk/drivers/gpio/Makefile
+++ b/trunk/drivers/gpio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
-obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
diff --git a/trunk/drivers/gpio/gpiolib.c b/trunk/drivers/gpio/gpiolib.c
index a25ad284a272..50de0f5750d8 100644
--- a/trunk/drivers/gpio/gpiolib.c
+++ b/trunk/drivers/gpio/gpiolib.c
@@ -53,7 +53,6 @@ struct gpio_desc {
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
-#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define PDESC_ID_SHIFT 16 /* add new flags before this one */
@@ -211,11 +210,6 @@ static DEFINE_MUTEX(sysfs_lock);
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
- * /active_low
- * * configures polarity of /value
- * * is read/write as zero/nonzero
- * * also affects existing and subsequent "falling" and "rising"
- * /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
@@ -261,7 +255,7 @@ static ssize_t gpio_direction_store(struct device *dev,
return status ? : size;
}
-static /* const */ DEVICE_ATTR(direction, 0644,
+static const DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
@@ -273,17 +267,10 @@ static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
- } else {
- int value;
-
- value = !!gpio_get_value_cansleep(gpio);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
-
- status = sprintf(buf, "%d\n", value);
- }
+ else
+ status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
mutex_unlock(&sysfs_lock);
return status;
@@ -307,8 +294,6 @@ static ssize_t gpio_value_store(struct device *dev,
status = strict_strtol(buf, 0, &value);
if (status == 0) {
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
@@ -318,7 +303,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static const DEVICE_ATTR(value, 0644,
+static /*const*/ DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -367,11 +352,9 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ irq_flags |= IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ irq_flags |= IRQF_TRIGGER_RISING;
if (!pdesc) {
pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
@@ -492,79 +475,9 @@ static ssize_t gpio_edge_store(struct device *dev,
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
-static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
- int value)
-{
- int status = 0;
-
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
- return 0;
-
- if (value)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- else
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
- /* reconfigure poll(2) support if enabled on one edge only */
- if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
- !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
- unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
-
- gpio_setup_irq(desc, dev, 0);
- status = gpio_setup_irq(desc, dev, trigger_flags);
- }
-
- return status;
-}
-
-static ssize_t gpio_active_low_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else
- status = sprintf(buf, "%d\n",
- !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
-
- mutex_unlock(&sysfs_lock);
-
- return status;
-}
-
-static ssize_t gpio_active_low_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags)) {
- status = -EIO;
- } else {
- long value;
-
- status = strict_strtol(buf, 0, &value);
- if (status == 0)
- status = sysfs_set_active_low(desc, dev, value != 0);
- }
-
- mutex_unlock(&sysfs_lock);
-
- return status ? : size;
-}
-
-static const DEVICE_ATTR(active_low, 0644,
- gpio_active_low_show, gpio_active_low_store);
-
static const struct attribute *gpio_attrs[] = {
+ &dev_attr_direction.attr,
&dev_attr_value.attr,
- &dev_attr_active_low.attr,
NULL,
};
@@ -749,12 +662,12 @@ int gpio_export(unsigned gpio, bool direction_may_change)
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%d", gpio);
if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
+ if (direction_may_change)
+ status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
-
- if (!status && direction_may_change)
+ else
status = device_create_file(dev,
- &dev_attr_direction);
+ &dev_attr_value);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
@@ -831,55 +744,6 @@ int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
}
EXPORT_SYMBOL_GPL(gpio_export_link);
-
-/**
- * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
- * @gpio: gpio to change
- * @value: non-zero to use active low, i.e. inverted values
- *
- * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
- * The GPIO does not have to be exported yet. If poll(2) support has
- * been enabled for either rising or falling edge, it will be
- * reconfigured to follow the new polarity.
- *
- * Returns zero on success, else an error.
- */
-int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- struct gpio_desc *desc;
- struct device *dev = NULL;
- int status = -EINVAL;
-
- if (!gpio_is_valid(gpio))
- goto done;
-
- mutex_lock(&sysfs_lock);
-
- desc = &gpio_desc[gpio];
-
- if (test_bit(FLAG_EXPORT, &desc->flags)) {
- struct device *dev;
-
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (dev == NULL) {
- status = -ENODEV;
- goto unlock;
- }
- }
-
- status = sysfs_set_active_low(desc, dev, value);
-
-unlock:
- mutex_unlock(&sysfs_lock);
-
-done:
- if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
-
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
@@ -1230,7 +1094,6 @@ void gpio_free(unsigned gpio)
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
} else
WARN_ON(extra_checks);
diff --git a/trunk/drivers/gpio/langwell_gpio.c b/trunk/drivers/gpio/langwell_gpio.c
index 6c0ebbdc659e..4baf3d7d0f8e 100644
--- a/trunk/drivers/gpio/langwell_gpio.c
+++ b/trunk/drivers/gpio/langwell_gpio.c
@@ -123,7 +123,7 @@ static int lnw_irq_type(unsigned irq, unsigned type)
void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
- if (gpio >= lnw->chip.ngpio)
+ if (gpio < 0 || gpio > lnw->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
diff --git a/trunk/drivers/gpio/timbgpio.c b/trunk/drivers/gpio/timbgpio.c
deleted file mode 100644
index a4d344ba8e5c..000000000000
--- a/trunk/drivers/gpio/timbgpio.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * timbgpio.c timberdale FPGA GPIO driver
- * Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Timberdale FPGA GPIO
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define DRIVER_NAME "timb-gpio"
-
-#define TGPIOVAL 0x00
-#define TGPIODIR 0x04
-#define TGPIO_IER 0x08
-#define TGPIO_ISR 0x0c
-#define TGPIO_IPR 0x10
-#define TGPIO_ICR 0x14
-#define TGPIO_FLR 0x18
-#define TGPIO_LVR 0x1c
-
-struct timbgpio {
- void __iomem *membase;
- spinlock_t lock; /* mutual exclusion */
- struct gpio_chip gpio;
- int irq_base;
-};
-
-static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
- unsigned offset, bool enabled)
-{
- struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
- u32 reg;
-
- spin_lock(&tgpio->lock);
- reg = ioread32(tgpio->membase + offset);
-
- if (enabled)
- reg |= (1 << index);
- else
- reg &= ~(1 << index);
-
- iowrite32(reg, tgpio->membase + offset);
- spin_unlock(&tgpio->lock);
-
- return 0;
-}
-
-static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
-{
- return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
-}
-
-static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
-{
- struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
- u32 value;
-
- value = ioread32(tgpio->membase + TGPIOVAL);
- return (value & (1 << nr)) ? 1 : 0;
-}
-
-static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
- unsigned nr, int val)
-{
- return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
-}
-
-static void timbgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
-{
- timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
-}
-
-static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
-{
- struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
-
- if (tgpio->irq_base <= 0)
- return -EINVAL;
-
- return tgpio->irq_base + offset;
-}
-
-/*
- * GPIO IRQ
- */
-static void timbgpio_irq_disable(unsigned irq)
-{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
-
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
-}
-
-static void timbgpio_irq_enable(unsigned irq)
-{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
-
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
-}
-
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
-{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
- unsigned long flags;
- u32 lvr, flr;
-
- if (offset < 0 || offset > tgpio->gpio.ngpio)
- return -EINVAL;
-
- spin_lock_irqsave(&tgpio->lock, flags);
-
- lvr = ioread32(tgpio->membase + TGPIO_LVR);
- flr = ioread32(tgpio->membase + TGPIO_FLR);
-
- if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- flr &= ~(1 << offset);
- if (trigger & IRQ_TYPE_LEVEL_HIGH)
- lvr |= 1 << offset;
- else
- lvr &= ~(1 << offset);
- }
-
- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
- else {
- flr |= 1 << offset;
- /* opposite compared to the datasheet, but it mirrors the
- * reality
- */
- if (trigger & IRQ_TYPE_EDGE_FALLING)
- lvr |= 1 << offset;
- else
- lvr &= ~(1 << offset);
- }
-
- iowrite32(lvr, tgpio->membase + TGPIO_LVR);
- iowrite32(flr, tgpio->membase + TGPIO_FLR);
- iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
- spin_unlock_irqrestore(&tgpio->lock, flags);
-
- return 0;
-}
-
-static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
-{
- struct timbgpio *tgpio = get_irq_data(irq);
- unsigned long ipr;
- int offset;
-
- desc->chip->ack(irq);
- ipr = ioread32(tgpio->membase + TGPIO_IPR);
- iowrite32(ipr, tgpio->membase + TGPIO_ICR);
-
- for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
- generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
-}
-
-static struct irq_chip timbgpio_irqchip = {
- .name = "GPIO",
- .enable = timbgpio_irq_enable,
- .disable = timbgpio_irq_disable,
- .set_type = timbgpio_irq_type,
-};
-
-static int __devinit timbgpio_probe(struct platform_device *pdev)
-{
- int err, i;
- struct gpio_chip *gc;
- struct timbgpio *tgpio;
- struct resource *iomem;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
- int irq = platform_get_irq(pdev, 0);
-
- if (!pdata || pdata->nr_pins > 32) {
- err = -EINVAL;
- goto err_mem;
- }
-
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- err = -EINVAL;
- goto err_mem;
- }
-
- tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
- if (!tgpio) {
- err = -EINVAL;
- goto err_mem;
- }
- tgpio->irq_base = pdata->irq_base;
-
- spin_lock_init(&tgpio->lock);
-
- if (!request_mem_region(iomem->start, resource_size(iomem),
- DRIVER_NAME)) {
- err = -EBUSY;
- goto err_request;
- }
-
- tgpio->membase = ioremap(iomem->start, resource_size(iomem));
- if (!tgpio->membase) {
- err = -ENOMEM;
- goto err_ioremap;
- }
-
- gc = &tgpio->gpio;
-
- gc->label = dev_name(&pdev->dev);
- gc->owner = THIS_MODULE;
- gc->dev = &pdev->dev;
- gc->direction_input = timbgpio_gpio_direction_input;
- gc->get = timbgpio_gpio_get;
- gc->direction_output = timbgpio_gpio_direction_output;
- gc->set = timbgpio_gpio_set;
- gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
- gc->dbg_show = NULL;
- gc->base = pdata->gpio_base;
- gc->ngpio = pdata->nr_pins;
- gc->can_sleep = 0;
-
- err = gpiochip_add(gc);
- if (err)
- goto err_chipadd;
-
- platform_set_drvdata(pdev, tgpio);
-
- /* make sure to disable interrupts */
- iowrite32(0x0, tgpio->membase + TGPIO_IER);
-
- if (irq < 0 || tgpio->irq_base <= 0)
- return 0;
-
- for (i = 0; i < pdata->nr_pins; i++) {
- set_irq_chip_and_handler_name(tgpio->irq_base + i,
- &timbgpio_irqchip, handle_simple_irq, "mux");
- set_irq_chip_data(tgpio->irq_base + i, tgpio);
-#ifdef CONFIG_ARM
- set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
-#endif
- }
-
- set_irq_data(irq, tgpio);
- set_irq_chained_handler(irq, timbgpio_irq);
-
- return 0;
-
-err_chipadd:
- iounmap(tgpio->membase);
-err_ioremap:
- release_mem_region(iomem->start, resource_size(iomem));
-err_request:
- kfree(tgpio);
-err_mem:
- printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
-
- return err;
-}
-
-static int __devexit timbgpio_remove(struct platform_device *pdev)
-{
- int err;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
- struct timbgpio *tgpio = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq >= 0 && tgpio->irq_base > 0) {
- int i;
- for (i = 0; i < pdata->nr_pins; i++) {
- set_irq_chip(tgpio->irq_base + i, NULL);
- set_irq_chip_data(tgpio->irq_base + i, NULL);
- }
-
- set_irq_handler(irq, NULL);
- set_irq_data(irq, NULL);
- }
-
- err = gpiochip_remove(&tgpio->gpio);
- if (err)
- printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
-
- iounmap(tgpio->membase);
- release_mem_region(iomem->start, resource_size(iomem));
- kfree(tgpio);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver timbgpio_platform_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
- .probe = timbgpio_probe,
- .remove = timbgpio_remove,
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int __init timbgpio_init(void)
-{
- return platform_driver_register(&timbgpio_platform_driver);
-}
-
-static void __exit timbgpio_exit(void)
-{
- platform_driver_unregister(&timbgpio_platform_driver);
-}
-
-module_init(timbgpio_init);
-module_exit(timbgpio_exit);
-
-MODULE_DESCRIPTION("Timberdale GPIO driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mocean Laboratories");
-MODULE_ALIAS("platform:"DRIVER_NAME);
-
diff --git a/trunk/drivers/gpu/drm/nouveau/Kconfig b/trunk/drivers/gpu/drm/nouveau/Kconfig
index b1bc1ea182b8..d823e6319516 100644
--- a/trunk/drivers/gpu/drm/nouveau/Kconfig
+++ b/trunk/drivers/gpu/drm/nouveau/Kconfig
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
via debugfs.
menu "I2C encoder or helper chips"
- depends on DRM && I2C
+ depends on DRM
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
- depends on DRM_NOUVEAU
- default m
+ default m if DRM_NOUVEAU
help
Support for Chrontel ch7006 and similar TV encoders, found
on some nVidia video cards.
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
index aa2dfbc3e351..320a14bceb99 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -311,10 +311,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
struct drm_device *dev = dev_priv->dev;
switch (dev_priv->gart_info.type) {
-#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
-#endif
case NOUVEAU_GART_SGDMA:
return nouveau_sgdma_init_ttm(dev);
default:
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c b/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
index dacac9a0842a..0cff7eb3690a 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -205,7 +205,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
schedule_timeout(1);
if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -ERESTART;
break;
}
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
index 18fd8ac9fca7..11f831f0ddc5 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -342,6 +342,8 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
if (ret)
return ret;
goto retry;
@@ -913,6 +915,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
goto out;
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
if (ret)
goto out;
}
@@ -921,6 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
+ else
if (ret == 0)
nvbo->cpu_filp = file_priv;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5158a12f7844..02755712ed3d 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,7 +407,6 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
return 0;
}
-#if __OS_HAS_AGP
static void nouveau_mem_reset_agp(struct drm_device *dev)
{
uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
@@ -433,12 +432,10 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
}
-#endif
int
nouveau_mem_init_agp(struct drm_device *dev)
{
-#if __OS_HAS_AGP
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -474,7 +471,6 @@ nouveau_mem_init_agp(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
dev_priv->gart_info.aper_base = info.aperture_base;
dev_priv->gart_info.aper_size = info.aperture_size;
-#endif
return 0;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
index 7e8547cb5833..d3e0a2a6acf8 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -252,9 +252,8 @@ nv40_grctx_init(struct drm_device *dev)
memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
- if (le32_to_cpu(cp->signature) != 0x5043564e ||
- cp->version != 0 ||
- le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
+ if (cp->signature != 0x5043564e || cp->version != 0 ||
+ cp->length != ((fw->size - 7) / 4)) {
NV_ERROR(dev, "ctxprog invalid\n");
release_firmware(fw);
nv40_grctx_fini(dev);
@@ -282,9 +281,8 @@ nv40_grctx_init(struct drm_device *dev)
memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
- if (le32_to_cpu(cv->signature) != 0x5643564e ||
- cv->version != 0 ||
- le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
+ if (cv->signature != 0x5643564e || cv->version != 0 ||
+ cv->length != ((fw->size - 9) / 8)) {
NV_ERROR(dev, "ctxvals invalid\n");
release_firmware(fw);
nv40_grctx_fini(dev);
@@ -296,9 +294,8 @@ nv40_grctx_init(struct drm_device *dev)
cp = pgraph->ctxprog;
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < le16_to_cpu(cp->length); i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
- le32_to_cpu(cp->data[i]));
+ for (i = 0; i < cp->length; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
pgraph->accel_blocked = false;
return 0;
@@ -332,9 +329,8 @@ nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
if (!cv)
return;
- for (i = 0; i < le32_to_cpu(cv->length); i++)
- nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
- le32_to_cpu(cv->data[i].value));
+ for (i = 0; i < cv->length; i++)
+ nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
}
/*
diff --git a/trunk/drivers/gpu/drm/radeon/Makefile b/trunk/drivers/gpu/drm/radeon/Makefile
index b5f5fe75e6af..feb52eee4314 100644
--- a/trunk/drivers/gpu/drm/radeon/Makefile
+++ b/trunk/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c
index 84e5df766d3f..824cc6480a06 100644
--- a/trunk/drivers/gpu/drm/radeon/r100.c
+++ b/trunk/drivers/gpu/drm/radeon/r100.c
@@ -1374,6 +1374,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_ARGB4444:
case RADEON_TXFORMAT_VYUY422:
case RADEON_TXFORMAT_YVYU422:
+ case RADEON_TXFORMAT_DXT1:
case RADEON_TXFORMAT_SHADOW16:
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
@@ -1381,19 +1382,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
+ case RADEON_TXFORMAT_DXT23:
+ case RADEON_TXFORMAT_DXT45:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
break;
- case RADEON_TXFORMAT_DXT1:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
- break;
- case RADEON_TXFORMAT_DXT23:
- case RADEON_TXFORMAT_DXT45:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
- break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -2737,7 +2731,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
}
static int r100_cs_track_cube(struct radeon_device *rdev,
@@ -2767,36 +2760,6 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
return 0;
}
-static int r100_track_compress_size(int compress_format, int w, int h)
-{
- int block_width, block_height, block_bytes;
- int wblocks, hblocks;
- int min_wblocks;
- int sz;
-
- block_width = 4;
- block_height = 4;
-
- switch (compress_format) {
- case R100_TRACK_COMP_DXT1:
- block_bytes = 8;
- min_wblocks = 4;
- break;
- default:
- case R100_TRACK_COMP_DXT35:
- block_bytes = 16;
- min_wblocks = 2;
- break;
- }
-
- hblocks = (h + block_height - 1) / block_height;
- wblocks = (w + block_width - 1) / block_width;
- if (wblocks < min_wblocks)
- wblocks = min_wblocks;
- sz = wblocks * hblocks * block_bytes;
- return sz;
-}
-
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
@@ -2834,15 +2797,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
- if (track->textures[u].compress_format) {
-
- size += r100_track_compress_size(track->textures[u].compress_format, w, h);
- /* compressed textures are block based */
- } else
- size += w * h;
+ size += w * h;
}
size *= track->textures[u].cpp;
-
switch (track->textures[u].tex_coord_type) {
case 0:
break;
@@ -3010,7 +2967,6 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
- track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
@@ -3443,8 +3399,6 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Get vram informations */
r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/trunk/drivers/gpu/drm/radeon/r100_track.h b/trunk/drivers/gpu/drm/radeon/r100_track.h
index 7188c3778ee2..ca50903dd2bb 100644
--- a/trunk/drivers/gpu/drm/radeon/r100_track.h
+++ b/trunk/drivers/gpu/drm/radeon/r100_track.h
@@ -28,10 +28,6 @@ struct r100_cs_cube_info {
unsigned height;
};
-#define R100_TRACK_COMP_NONE 0
-#define R100_TRACK_COMP_DXT1 1
-#define R100_TRACK_COMP_DXT35 2
-
struct r100_cs_track_texture {
struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
@@ -48,7 +44,6 @@ struct r100_cs_track_texture {
bool enabled;
bool roundup_w;
bool roundup_h;
- unsigned compress_format;
};
struct r100_cs_track_limits {
diff --git a/trunk/drivers/gpu/drm/radeon/r200.c b/trunk/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..eb740fc3549f 100644
--- a/trunk/drivers/gpu/drm/radeon/r200.c
+++ b/trunk/drivers/gpu/drm/radeon/r200.c
@@ -401,6 +401,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
break;
+ case R200_TXFORMAT_DXT1:
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565:
@@ -417,16 +418,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
- track->textures[i].cpp = 4;
- break;
- case R200_TXFORMAT_DXT1:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
- break;
case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ track->textures[i].cpp = 4;
break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c
index 83490c2b5061..83378c39d0e3 100644
--- a/trunk/drivers/gpu/drm/radeon/r300.c
+++ b/trunk/drivers/gpu/drm/radeon/r300.c
@@ -686,15 +686,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_TXO_MICRO_TILE;
-
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
break;
/* Tracked registers */
@@ -860,6 +852,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Z6Y5X5:
case R300_TX_FORMAT_W4Z4Y4X4:
case R300_TX_FORMAT_W1Z5Y5X5:
+ case R300_TX_FORMAT_DXT1:
case R300_TX_FORMAT_D3DMFT_CxV8U8:
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
@@ -873,6 +866,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x17:
case R300_TX_FORMAT_FL_I32:
case 0x1e:
+ case R300_TX_FORMAT_DXT3:
+ case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 4;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
@@ -883,15 +878,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
break;
- case R300_TX_FORMAT_DXT1:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
- break;
- case R300_TX_FORMAT_DXT3:
- case R300_TX_FORMAT_DXT5:
- track->textures[i].cpp = 1;
- track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
- break;
default:
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
@@ -1338,8 +1324,6 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c
index a0ac3c134b1b..36656bd110bf 100644
--- a/trunk/drivers/gpu/drm/radeon/r600.c
+++ b/trunk/drivers/gpu/drm/radeon/r600.c
@@ -1863,14 +1863,6 @@ int r600_startup(struct radeon_device *rdev)
}
r600_gpu_init(rdev);
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
- }
-
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
@@ -2046,6 +2038,12 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
@@ -2067,10 +2065,6 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
}
-
- r = r600_audio_init(rdev);
- if (r)
- return r; /* TODO error handling */
return 0;
}
@@ -2079,7 +2073,6 @@ void r600_fini(struct radeon_device *rdev)
/* Suspend operations */
r600_suspend(rdev);
- r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/r600_audio.c b/trunk/drivers/gpu/drm/radeon/r600_audio.c
deleted file mode 100644
index 99e2c3891a7d..000000000000
--- a/trunk/drivers/gpu/drm/radeon/r600_audio.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Christian König.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Christian König
- */
-#include "drmP.h"
-#include "radeon.h"
-#include "radeon_reg.h"
-#include "atom.h"
-
-#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
-
-/*
- * check if the chipset is supported
- */
-static int r600_audio_chipset_supported(struct radeon_device *rdev)
-{
- return rdev->family >= CHIP_R600
- || rdev->family == CHIP_RS600
- || rdev->family == CHIP_RS690
- || rdev->family == CHIP_RS740;
-}
-
-/*
- * current number of channels
- */
-static int r600_audio_channels(struct radeon_device *rdev)
-{
- return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
-}
-
-/*
- * current bits per sample
- */
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
-{
- uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
- switch (value) {
- case 0x0: return 8;
- case 0x1: return 16;
- case 0x2: return 20;
- case 0x3: return 24;
- case 0x4: return 32;
- }
-
- DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
-
- return 16;
-}
-
-/*
- * current sampling rate in HZ
- */
-static int r600_audio_rate(struct radeon_device *rdev)
-{
- uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- uint32_t result;
-
- if (value & 0x4000)
- result = 44100;
- else
- result = 48000;
-
- result *= ((value >> 11) & 0x7) + 1;
- result /= ((value >> 8) & 0x7) + 1;
-
- return result;
-}
-
-/*
- * iec 60958 status bits
- */
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
-{
- return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
-}
-
-/*
- * iec 60958 category code
- */
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
-{
- return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
-}
-
-/*
- * update all hdmi interfaces with current audio parameters
- */
-static void r600_audio_update_hdmi(unsigned long param)
-{
- struct radeon_device *rdev = (struct radeon_device *)param;
- struct drm_device *dev = rdev->ddev;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
- struct drm_encoder *encoder;
- int changes = 0;
-
- changes |= channels != rdev->audio_channels;
- changes |= rate != rdev->audio_rate;
- changes |= bps != rdev->audio_bits_per_sample;
- changes |= status_bits != rdev->audio_status_bits;
- changes |= category_code != rdev->audio_category_code;
-
- if (changes) {
- rdev->audio_channels = channels;
- rdev->audio_rate = rate;
- rdev->audio_bits_per_sample = bps;
- rdev->audio_status_bits = status_bits;
- rdev->audio_category_code = category_code;
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
- }
-
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
-}
-
-/*
- * initialize the audio vars and register the update timer
- */
-int r600_audio_init(struct radeon_device *rdev)
-{
- if (!r600_audio_chipset_supported(rdev))
- return 0;
-
- DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
-
- rdev->audio_channels = -1;
- rdev->audio_rate = -1;
- rdev->audio_bits_per_sample = -1;
- rdev->audio_status_bits = 0;
- rdev->audio_category_code = 0;
-
- setup_timer(
- &rdev->audio_timer,
- r600_audio_update_hdmi,
- (unsigned long)rdev);
-
- mod_timer(&rdev->audio_timer, jiffies + 1);
-
- return 0;
-}
-
-/*
- * determin how the encoders and audio interface is wired together
- */
-int r600_audio_tmds_index(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *other;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- return 0;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- /* special case check if an TMDS1 is present */
- list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
- if (to_radeon_encoder(other)->encoder_id ==
- ENCODER_OBJECT_ID_INTERNAL_TMDS1)
- return 1;
- }
- return 0;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- return 1;
-
- default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return -1;
- }
-}
-
-/*
- * atach the audio codec to the clock source of the encoder
- */
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- int base_rate = 48000;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
- break;
-
- default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
-
- switch (r600_audio_tmds_index(encoder)) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL1_DIV, clock*100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL2_DIV, clock*100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- }
-}
-
-/*
- * release the audio timer
- * TODO: How to do this correctly on SMP systems?
- */
-void r600_audio_fini(struct radeon_device *rdev)
-{
- if (!r600_audio_chipset_supported(rdev))
- return;
-
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
-
- del_timer(&rdev->audio_timer);
-}
diff --git a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c
deleted file mode 100644
index fcc949df0e5d..000000000000
--- a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Christian König.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Christian König
- */
-#include "drmP.h"
-#include "radeon_drm.h"
-#include "radeon.h"
-#include "atom.h"
-
-/*
- * HDMI color format
- */
-enum r600_hdmi_color_format {
- RGB = 0,
- YCC_422 = 1,
- YCC_444 = 2
-};
-
-/*
- * IEC60958 status bits
- */
-enum r600_hdmi_iec_status_bits {
- AUDIO_STATUS_DIG_ENABLE = 0x01,
- AUDIO_STATUS_V = 0x02,
- AUDIO_STATUS_VCFG = 0x04,
- AUDIO_STATUS_EMPHASIS = 0x08,
- AUDIO_STATUS_COPYRIGHT = 0x10,
- AUDIO_STATUS_NONAUDIO = 0x20,
- AUDIO_STATUS_PROFESSIONAL = 0x40,
- AUDIO_STATUS_LEVEL = 0x80
-};
-
-struct {
- uint32_t Clock;
-
- int N_32kHz;
- int CTS_32kHz;
-
- int N_44_1kHz;
- int CTS_44_1kHz;
-
- int N_48kHz;
- int CTS_48kHz;
-
-} r600_hdmi_ACR[] = {
- /* 32kHz 44.1kHz 48kHz */
- /* Clock N CTS N CTS N CTS */
- { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
- { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
- { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
- { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
- { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
- { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
- { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
- { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
- { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
- { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
- { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
-};
-
-/*
- * calculate CTS value if it's not found in the table
- */
-static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
-{
- if (*CTS == 0)
- *CTS = clock*N/(128*freq)*1000;
- DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
- N, *CTS, freq);
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- int CTS;
- int N;
- int i;
-
- for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
-
- CTS = r600_hdmi_ACR[i].CTS_32kHz;
- N = r600_hdmi_ACR[i].N_32kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
- WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_32kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
- N = r600_hdmi_ACR[i].N_44_1kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
- WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_44_1kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_48kHz;
- N = r600_hdmi_ACR[i].N_48kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
- WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_48kHz_N, N);
-}
-
-/*
- * calculate the crc for a given info frame
- */
-static void r600_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void r600_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- enum r600_hdmi_color_format color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
-
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
- frame[0xC] | (frame[0xD] << 8));
-}
-
-/*
- * build a Audio Info Frame
- */
-static void r600_hdmi_audioinfoframe(
- struct drm_encoder *encoder,
- uint8_t channel_count,
- uint8_t coding_type,
- uint8_t sample_size,
- uint8_t sample_frequency,
- uint8_t format,
- uint8_t channel_allocation,
- uint8_t level_shift,
- int downmix_inhibit
-)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- uint8_t frame[11];
-
- frame[0x0] = 0;
- frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
- frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
- frame[0x3] = format;
- frame[0x4] = channel_allocation;
- frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
- frame[0x6] = 0;
- frame[0x7] = 0;
- frame[0x8] = 0;
- frame[0x9] = 0;
- frame[0xA] = 0;
-
- r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
-
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
-}
-
-/*
- * test if audio buffer is filled enough to start playing
- */
-static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
-}
-
-/*
- * have buffer status changed since last call?
- */
-int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- int status, result;
-
- if (!radeon_encoder->hdmi_offset)
- return 0;
-
- status = r600_hdmi_is_audio_buffer_filled(encoder);
- result = radeon_encoder->hdmi_buffer_status != status;
- radeon_encoder->hdmi_buffer_status = status;
-
- return result;
-}
-
-/*
- * write the audio workaround status to the hardware
- */
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = radeon_encoder->hdmi_offset;
-
- if (!offset)
- return;
-
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
-
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
-
- } else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
- }
-}
-
-
-/*
- * update the info frames with the data from the current display mode
- */
-void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- if (!offset)
- return;
-
- r600_audio_set_clock(encoder, mode->clock);
-
- WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
- WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
- WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
-
- r600_hdmi_update_ACR(encoder, mode->clock);
-
- WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
-
- WREG32(offset+R600_HDMI_VERSION, 0x202);
-
- r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-
- /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
-
- r600_hdmi_audio_workaround(encoder);
-
- /* audio packets per line, does anyone know how to calc this ? */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
-}
-
-/*
- * update settings with current parameters from audio engine
- */
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- uint32_t iec;
-
- if (!offset)
- return;
-
- DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
- r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
- channels, rate, bps);
- DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
- (int)status_bits, (int)category_code);
-
- iec = 0;
- if (status_bits & AUDIO_STATUS_PROFESSIONAL)
- iec |= 1 << 0;
- if (status_bits & AUDIO_STATUS_NONAUDIO)
- iec |= 1 << 1;
- if (status_bits & AUDIO_STATUS_COPYRIGHT)
- iec |= 1 << 2;
- if (status_bits & AUDIO_STATUS_EMPHASIS)
- iec |= 1 << 3;
-
- iec |= category_code << 8;
-
- switch (rate) {
- case 32000: iec |= 0x3 << 24; break;
- case 44100: iec |= 0x0 << 24; break;
- case 88200: iec |= 0x8 << 24; break;
- case 176400: iec |= 0xc << 24; break;
- case 48000: iec |= 0x2 << 24; break;
- case 96000: iec |= 0xa << 24; break;
- case 192000: iec |= 0xe << 24; break;
- }
-
- WREG32(offset+R600_HDMI_IEC60958_1, iec);
-
- iec = 0;
- switch (bps) {
- case 16: iec |= 0x2; break;
- case 20: iec |= 0x3; break;
- case 24: iec |= 0xb; break;
- }
- if (status_bits & AUDIO_STATUS_V)
- iec |= 0x5 << 16;
-
- WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
-
- /* 0x021 or 0x031 sets the audio frame length */
- WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
- r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
-
- r600_hdmi_audio_workaround(encoder);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
-}
-
-/*
- * enable/disable the HDMI engine
- */
-void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- if (!offset)
- return;
-
- DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
-
- /* some version of atombios ignore the enable HDMI flag
- * so enabling/disabling HDMI was moved here for TMDS1+2 */
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* This part is doubtfull in my opinion */
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
- break;
-
- default:
- DRM_ERROR("unknown HDMI output type\n");
- break;
- }
-}
-
-/*
- * determin at which register offset the HDMI encoder is
- */
-void r600_hdmi_init(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- switch (r600_audio_tmds_index(encoder)) {
- case 0:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
- break;
- case 1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
- break;
- default:
- radeon_encoder->hdmi_offset = 0;
- break;
- }
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- radeon_encoder->hdmi_offset = R600_HDMI_DIG;
- break;
-
- default:
- radeon_encoder->hdmi_offset = 0;
- break;
- }
-
- DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
-
- /* TODO: make this configureable */
- radeon_encoder->hdmi_audio_workaround = 0;
-}
diff --git a/trunk/drivers/gpu/drm/radeon/r600_reg.h b/trunk/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ffdeda9..e2d1f5f33f7e 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_reg.h
+++ b/trunk/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,79 +110,5 @@
#define R600_BIOS_6_SCRATCH 0x173c
#define R600_BIOS_7_SCRATCH 0x1740
-/* Audio, these regs were reverse enginered,
- * so the chance is high that the naming is wrong
- * R6xx+ ??? */
-
-/* Audio clocks */
-#define R600_AUDIO_PLL1_MUL 0x0514
-#define R600_AUDIO_PLL1_DIV 0x0518
-#define R600_AUDIO_PLL2_MUL 0x0524
-#define R600_AUDIO_PLL2_DIV 0x0528
-#define R600_AUDIO_CLK_SRCSEL 0x0534
-
-/* Audio general */
-#define R600_AUDIO_ENABLE 0x7300
-#define R600_AUDIO_TIMING 0x7344
-
-/* Audio params */
-#define R600_AUDIO_VENDOR_ID 0x7380
-#define R600_AUDIO_REVISION_ID 0x7384
-#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
-#define R600_AUDIO_NID1_NODE_COUNT 0x738c
-#define R600_AUDIO_NID1_TYPE 0x7390
-#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
-#define R600_AUDIO_SUPPORTED_CODEC 0x7398
-#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
-#define R600_AUDIO_NID2_CAPS 0x73a0
-#define R600_AUDIO_NID3_CAPS 0x73a4
-#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
-
-/* Audio conn list */
-#define R600_AUDIO_CONN_LIST_LEN 0x73ac
-#define R600_AUDIO_CONN_LIST 0x73b0
-
-/* Audio verbs */
-#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
-#define R600_AUDIO_PLAYING 0x73c4
-#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
-#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
-#define R600_AUDIO_PIN_SENSE 0x73d0
-#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
-#define R600_AUDIO_STATUS_BITS 0x73d8
-
-/* HDMI base register addresses */
-#define R600_HDMI_TMDS1 0x7400
-#define R600_HDMI_TMDS2 0x7700
-#define R600_HDMI_DIG 0x7800
-
-/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
#endif
diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h
index cd650fd3964e..c938bb54123c 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
-extern int radeon_audio;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -815,14 +814,6 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
-
- /* audio stuff */
- struct timer_list audio_timer;
- int audio_channels;
- int audio_rate;
- int audio_bits_per_sample;
- uint8_t audio_status_bits;
- uint8_t audio_category_code;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1025,7 +1016,6 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
-extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1156,21 +1146,6 @@ extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
-extern int r600_audio_init(struct radeon_device *rdev);
-extern int r600_audio_tmds_index(struct drm_encoder *encoder);
-extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-extern void r600_audio_fini(struct radeon_device *rdev);
-extern void r600_hdmi_init(struct drm_encoder *encoder);
-extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
-extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
-
#include "radeon_object.h"
#endif
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c
index dbd56ef82f9c..c5c45e626d74 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c
@@ -87,7 +87,6 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = 1;
-int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -125,9 +124,6 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444);
-MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
-module_param_named(audio, radeon_audio, int, 0444);
-
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
index 0d1d908e5225..b4f23ec93201 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -438,7 +438,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
- int hdmi_detected = 0;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
struct drm_connector *connector;
@@ -459,9 +458,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (!radeon_connector->con_priv)
return;
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- hdmi_detected = 1;
-
dig_connector = radeon_connector->con_priv;
memset(&args, 0, sizeof(args));
@@ -491,7 +487,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (hdmi_detected)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -516,7 +512,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (hdmi_detected)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -556,7 +552,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- r600_hdmi_enable(encoder, hdmi_detected);
+
}
int
@@ -897,6 +893,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
}
static void
@@ -1165,6 +1162,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
}
static void
@@ -1267,8 +1265,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
-
- r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1514,6 +1510,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
-
- r600_hdmi_init(encoder);
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4c..2944486871b0 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c
@@ -66,9 +66,8 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
}
r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
+ size, initial_domain, alignment);
mutex_lock(&rdev->ddev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
@@ -351,10 +350,9 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
rbo = gobj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
- goto out;
+ return r;
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
-out:
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
index 3dcbe130c422..44d4b652ea12 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
@@ -334,9 +334,6 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
- int hdmi_offset;
- int hdmi_audio_workaround;
- int hdmi_buffer_status;
};
struct radeon_connector_atom_dig {
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c
index d9ffe1f56e8f..544e18ffaf22 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_object.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,13 +56,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &radeon_ttm_bo_destroy)
- return true;
- return false;
-}
-
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
@@ -78,8 +71,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- if (!c)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
@@ -490,20 +481,14 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_bo *rbo;
- if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
- rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_bo *rbo;
- if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
- rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.h b/trunk/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..f6b69c2c0d00 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_object.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon_object.h
@@ -59,17 +59,19 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
+retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -123,10 +125,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
{
int r;
+retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
return r;
}
spin_lock(&bo->tbo.lock);
@@ -136,6 +140,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
ttm_bo_unreserve(&bo->tbo);
+ if (unlikely(r == -ERESTART))
+ goto retry;
return r;
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_pm.c b/trunk/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..34b08d307c81 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_pm.c
@@ -44,11 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
- if (rdev->asic->get_memory_clock)
- seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
index d7fd160cc671..5a19d529d1c0 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -200,19 +200,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct radeon_bo *rbo;
- static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
-
- if (!radeon_ttm_bo_is_radeon_bo(bo)) {
- placement->fpfn = 0;
- placement->lpfn = 0;
- placement->placement = &placements;
- placement->busy_placement = &placements;
- placement->num_placement = 1;
- placement->num_busy_placement = 1;
- return;
- }
- rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c
index 368415df5f3a..c1fcdddb6be6 100644
--- a/trunk/drivers/gpu/drm/radeon/rs400.c
+++ b/trunk/drivers/gpu/drm/radeon/rs400.c
@@ -497,8 +497,6 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Get vram informations */
rs400_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c
index 3bcb66e52786..fbb0357f1ec3 100644
--- a/trunk/drivers/gpu/drm/radeon/rv770.c
+++ b/trunk/drivers/gpu/drm/radeon/rv770.c
@@ -892,14 +892,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
- }
-
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
@@ -1059,6 +1051,12 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo.c b/trunk/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..1fbb2eea5e88 100644
--- a/trunk/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/trunk/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
return -EINVAL;
}
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+ struct ttm_mem_type_manager *man)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
- printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
+ printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
- if (mem_type != TTM_PL_SYSTEM) {
- spin_lock(&bdev->glob->lru_lock);
- drm_mm_debug_table(&man->manager, TTM_PFX);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&glob->lru_lock);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_mem_type_manager *man;
int i, ret, mem_type;
- printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
+ printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
@@ -106,9 +106,10 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
&mem_type);
if (ret)
return;
+ man = &bdev->man[mem_type];
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
- ttm_mem_type_debug(bo->bdev, mem_type);
+ ttm_mem_type_manager_debug(glob, man);
}
}
@@ -464,8 +465,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
-
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
BUG_ON(ret);
if (bo->ttm)
@@ -473,19 +472,20 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
- ++put_count;
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (bo->mem.mm_node) {
bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0);
while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
return 0;
}
@@ -684,45 +684,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo;
int ret, put_count = 0;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
-
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- }
-
- put_count = ttm_bo_del_from_lru(bo);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
-
- BUG_ON(ret != 0);
-
+ if (unlikely(ret != 0))
+ return ret;
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
ret = ttm_bo_evict(bo, interruptible, no_wait);
ttm_bo_unreserve(bo);
-
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -875,7 +849,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < placement->num_placement; ++i) {
+ for (i = 0; i <= placement->num_placement; ++i) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
@@ -926,8 +900,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_found)
return -EINVAL;
- for (i = 0; i < placement->num_busy_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+ for (i = 0; i <= placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return ret;
@@ -937,7 +911,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user,
mem_type,
- placement->busy_placement[i],
+ placement->placement[i],
&cur_flags))
continue;
@@ -947,7 +921,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+ ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
@@ -1141,7 +1115,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
- bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..609a85a4d855 100644
--- a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
return -EFAULT;
driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
+ if (unlikely(driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
diff --git a/trunk/drivers/infiniband/core/addr.c b/trunk/drivers/infiniband/core/addr.c
index abbb06996f9e..bd07803e9183 100644
--- a/trunk/drivers/infiniband/core/addr.c
+++ b/trunk/drivers/infiniband/core/addr.c
@@ -36,6 +36,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -91,12 +92,22 @@ EXPORT_SYMBOL(rdma_addr_unregister_client);
int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
const unsigned char *dst_dev_addr)
{
- dev_addr->dev_type = dev->type;
+ switch (dev->type) {
+ case ARPHRD_INFINIBAND:
+ dev_addr->dev_type = RDMA_NODE_IB_CA;
+ break;
+ case ARPHRD_ETHER:
+ dev_addr->dev_type = RDMA_NODE_RNIC;
+ break;
+ default:
+ return -EADDRNOTAVAIL;
+ }
+
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
if (dst_dev_addr)
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
- dev_addr->bound_dev_if = dev->ifindex;
+ dev_addr->src_dev = dev;
return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
@@ -106,15 +117,6 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
struct net_device *dev;
int ret = -EADDRNOTAVAIL;
- if (dev_addr->bound_dev_if) {
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (!dev)
- return -ENODEV;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
- return ret;
- }
-
switch (addr->sa_family) {
case AF_INET:
dev = ip_dev_find(&init_net,
@@ -129,7 +131,6 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
- read_lock(&dev_base_lock);
for_each_netdev(&init_net, dev) {
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
@@ -138,7 +139,6 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
break;
}
}
- read_unlock(&dev_base_lock);
break;
#endif
}
@@ -176,9 +176,48 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
-static int addr4_resolve(struct sockaddr_in *src_in,
- struct sockaddr_in *dst_in,
- struct rdma_dev_addr *addr)
+static void addr_send_arp(struct sockaddr *dst_in)
+{
+ struct rtable *rt;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof fl);
+
+ switch (dst_in->sa_family) {
+ case AF_INET:
+ fl.nl_u.ip4_u.daddr =
+ ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
+
+ if (ip_route_output_key(&init_net, &rt, &fl))
+ return;
+
+ neigh_event_send(rt->u.dst.neighbour, NULL);
+ ip_rt_put(rt);
+ break;
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ {
+ struct dst_entry *dst;
+
+ fl.nl_u.ip6_u.daddr =
+ ((struct sockaddr_in6 *) dst_in)->sin6_addr;
+
+ dst = ip6_route_output(&init_net, NULL, &fl);
+ if (!dst)
+ return;
+
+ neigh_event_send(dst->neighbour, NULL);
+ dst_release(dst);
+ break;
+ }
+#endif
+ }
+}
+
+static int addr4_resolve_remote(struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in,
+ struct rdma_dev_addr *addr)
{
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
@@ -190,22 +229,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = dst_ip;
fl.nl_u.ip4_u.saddr = src_ip;
- fl.oif = addr->bound_dev_if;
-
ret = ip_route_output_key(&init_net, &rt, &fl);
if (ret)
goto out;
- src_in->sin_family = AF_INET;
- src_in->sin_addr.s_addr = rt->rt_src;
-
- if (rt->idev->dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- goto put;
- }
-
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->idev->dev, NULL);
@@ -213,14 +240,21 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(rt->u.dst.neighbour, NULL);
+ if (!neigh) {
ret = -ENODATA;
- if (neigh)
- goto release;
goto put;
}
+ if (!(neigh->nud_state & NUD_VALID)) {
+ ret = -ENODATA;
+ goto release;
+ }
+
+ if (!src_ip) {
+ src_in->sin_family = dst_in->sin_family;
+ src_in->sin_addr.s_addr = rt->rt_src;
+ }
+
ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release:
neigh_release(neigh);
@@ -231,77 +265,52 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
struct flowi fl;
struct neighbour *neigh;
struct dst_entry *dst;
- int ret;
+ int ret = -ENODATA;
memset(&fl, 0, sizeof fl);
- ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
- ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
- fl.oif = addr->bound_dev_if;
+ fl.nl_u.ip6_u.daddr = dst_in->sin6_addr;
+ fl.nl_u.ip6_u.saddr = src_in->sin6_addr;
dst = ip6_route_output(&init_net, NULL, &fl);
- if ((ret = dst->error))
- goto put;
-
- if (ipv6_addr_any(&fl.fl6_src)) {
- ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
- &fl.fl6_dst, 0, &fl.fl6_src);
- if (ret)
- goto put;
-
- src_in->sin6_family = AF_INET6;
- ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
- }
-
- if (dst->dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- goto put;
- }
+ if (!dst)
+ return ret;
- /* If the device does ARP internally, return 'done' */
if (dst->dev->flags & IFF_NOARP) {
ret = rdma_copy_addr(addr, dst->dev, NULL);
- goto put;
- }
-
- neigh = dst->neighbour;
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(dst->neighbour, NULL);
- ret = -ENODATA;
- goto put;
+ } else {
+ neigh = dst->neighbour;
+ if (neigh && (neigh->nud_state & NUD_VALID))
+ ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
}
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
-put:
dst_release(dst);
return ret;
}
#else
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
return -EADDRNOTAVAIL;
}
#endif
-static int addr_resolve(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+static int addr_resolve_remote(struct sockaddr *src_in,
+ struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr)
{
if (src_in->sa_family == AF_INET) {
- return addr4_resolve((struct sockaddr_in *) src_in,
+ return addr4_resolve_remote((struct sockaddr_in *) src_in,
(struct sockaddr_in *) dst_in, addr);
} else
- return addr6_resolve((struct sockaddr_in6 *) src_in,
+ return addr6_resolve_remote((struct sockaddr_in6 *) src_in,
(struct sockaddr_in6 *) dst_in, addr);
}
@@ -318,7 +327,8 @@ static void process_req(struct work_struct *work)
if (req->status == -ENODATA) {
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve(src_in, dst_in, req->addr);
+ req->status = addr_resolve_remote(src_in, dst_in,
+ req->addr);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA)
@@ -342,6 +352,82 @@ static void process_req(struct work_struct *work)
}
}
+static int addr_resolve_local(struct sockaddr *src_in,
+ struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr)
+{
+ struct net_device *dev;
+ int ret;
+
+ switch (dst_in->sa_family) {
+ case AF_INET:
+ {
+ __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
+ __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
+
+ dev = ip_dev_find(&init_net, dst_ip);
+ if (!dev)
+ return -EADDRNOTAVAIL;
+
+ if (ipv4_is_zeronet(src_ip)) {
+ src_in->sa_family = dst_in->sa_family;
+ ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip;
+ ret = rdma_copy_addr(addr, dev, dev->dev_addr);
+ } else if (ipv4_is_loopback(src_ip)) {
+ ret = rdma_translate_ip(dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
+ } else {
+ ret = rdma_translate_ip(src_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
+ }
+ dev_put(dev);
+ break;
+ }
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ {
+ struct in6_addr *a;
+
+ for_each_netdev(&init_net, dev)
+ if (ipv6_chk_addr(&init_net,
+ &((struct sockaddr_in6 *) dst_in)->sin6_addr,
+ dev, 1))
+ break;
+
+ if (!dev)
+ return -EADDRNOTAVAIL;
+
+ a = &((struct sockaddr_in6 *) src_in)->sin6_addr;
+
+ if (ipv6_addr_any(a)) {
+ src_in->sa_family = dst_in->sa_family;
+ ((struct sockaddr_in6 *) src_in)->sin6_addr =
+ ((struct sockaddr_in6 *) dst_in)->sin6_addr;
+ ret = rdma_copy_addr(addr, dev, dev->dev_addr);
+ } else if (ipv6_addr_loopback(a)) {
+ ret = rdma_translate_ip(dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
+ } else {
+ ret = rdma_translate_ip(src_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
+ }
+ break;
+ }
+#endif
+
+ default:
+ ret = -EADDRNOTAVAIL;
+ break;
+ }
+
+ return ret;
+}
+
int rdma_resolve_ip(struct rdma_addr_client *client,
struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
@@ -357,28 +443,22 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
if (!req)
return -ENOMEM;
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
-
- if (src_addr) {
- if (src_addr->sa_family != dst_addr->sa_family) {
- ret = -EINVAL;
- goto err;
- }
-
- memcpy(src_in, src_addr, ip_addr_size(src_addr));
- } else {
- src_in->sa_family = dst_addr->sa_family;
- }
-
- memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
+ if (src_addr)
+ memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr));
+ memcpy(&req->dst_addr, dst_addr, ip_addr_size(dst_addr));
req->addr = addr;
req->callback = callback;
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
- req->status = addr_resolve(src_in, dst_in, addr);
+ src_in = (struct sockaddr *) &req->src_addr;
+ dst_in = (struct sockaddr *) &req->dst_addr;
+
+ req->status = addr_resolve_local(src_in, dst_in, addr);
+ if (req->status == -EADDRNOTAVAIL)
+ req->status = addr_resolve_remote(src_in, dst_in, addr);
+
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -387,16 +467,15 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
case -ENODATA:
req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
queue_req(req);
+ addr_send_arp(dst_in);
break;
default:
ret = req->status;
atomic_dec(&client->refcount);
- goto err;
+ kfree(req);
+ break;
}
return ret;
-err:
- kfree(req);
- return ret;
}
EXPORT_SYMBOL(rdma_resolve_ip);
diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c
index fbdd73106000..075317884b53 100644
--- a/trunk/drivers/infiniband/core/cma.c
+++ b/trunk/drivers/infiniband/core/cma.c
@@ -330,7 +330,17 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
union ib_gid gid;
int ret = -ENODEV;
- rdma_addr_get_sgid(dev_addr, &gid);
+ switch (rdma_node_get_transport(dev_addr->dev_type)) {
+ case RDMA_TRANSPORT_IB:
+ ib_addr_get_sgid(dev_addr, &gid);
+ break;
+ case RDMA_TRANSPORT_IWARP:
+ iw_addr_get_sgid(dev_addr, &gid);
+ break;
+ default:
+ return -ENODEV;
+ }
+
list_for_each_entry(cma_dev, &dev_list, list) {
ret = ib_find_cached_gid(cma_dev->device, &gid,
&id_priv->id.port_num, NULL);
@@ -1022,17 +1032,11 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
- if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
- rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
- rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
- } else {
- ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
- &rt->addr.dev_addr);
- if (ret)
- goto destroy_id;
- }
- rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
+ ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
+ ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto destroy_id;
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1067,12 +1071,10 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst);
- if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
- if (ret)
- goto err;
- }
+ ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1472,6 +1474,15 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
+static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
+{
+ struct sockaddr_storage addr_in;
+
+ memset(&addr_in, 0, sizeof addr_in);
+ addr_in.ss_family = af;
+ return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
+}
+
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
struct rdma_id_private *id_priv;
@@ -1479,8 +1490,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) {
- ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
- ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
+ ret = cma_bind_any(id, AF_INET);
if (ret)
return ret;
}
@@ -1555,8 +1565,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
struct sockaddr_in6 *sin6;
memset(&path_rec, 0, sizeof path_rec);
- rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
- rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
+ ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
+ ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
@@ -1771,11 +1781,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
if (ret)
goto out;
- id_priv->id.route.addr.dev_addr.dev_type =
- (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
- ARPHRD_INFINIBAND : ARPHRD_ETHER;
-
- rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
@@ -1833,7 +1839,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
{
struct cma_work *work;
- struct sockaddr *src, *dst;
+ struct sockaddr_in *src_in, *dst_in;
union ib_gid gid;
int ret;
@@ -1847,19 +1853,14 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
goto err;
}
- rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
- rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
+ ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- if (cma_zero_addr(src)) {
- dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
- if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
- } else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
- }
+ if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
+ src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
+ dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
+ src_in->sin_family = dst_in->sin_family;
+ src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
}
work->id = id_priv;
@@ -1877,14 +1878,10 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr)
{
- if (!src_addr || !src_addr->sa_family) {
- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
- ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
- ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
- }
- }
- return rdma_bind_addr(id, src_addr);
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr(id, src_addr);
+ else
+ return cma_bind_any(id, dst_addr->sa_family);
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
@@ -2080,25 +2077,6 @@ static int cma_get_port(struct rdma_id_private *id_priv)
return ret;
}
-static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
- struct sockaddr *addr)
-{
-#if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE)
- struct sockaddr_in6 *sin6;
-
- if (addr->sa_family != AF_INET6)
- return 0;
-
- sin6 = (struct sockaddr_in6 *) addr;
- if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
- !sin6->sin6_scope_id)
- return -EINVAL;
-
- dev_addr->bound_dev_if = sin6->sin6_scope_id;
-#endif
- return 0;
-}
-
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
@@ -2111,13 +2089,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
return -EINVAL;
- ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
- if (ret)
- goto err1;
-
- if (cma_loopback_addr(addr)) {
- ret = cma_bind_loopback(id_priv);
- } else if (!cma_zero_addr(addr)) {
+ if (!cma_any_addr(addr)) {
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
if (ret)
goto err1;
@@ -2136,7 +2108,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (id_priv->cma_dev) {
+ if (!cma_any_addr(addr)) {
mutex_lock(&lock);
cma_detach_from_dev(id_priv);
mutex_unlock(&lock);
@@ -2715,15 +2687,10 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
if (cma_any_addr(addr)) {
memset(mgid, 0, sizeof *mgid);
} else if ((addr->sa_family == AF_INET6) &&
- ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
+ ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
0xFF10A01B)) {
/* IPv6 address is an SA assigned MGID. */
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
- } else if ((addr->sa_family == AF_INET6)) {
- ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
- if (id_priv->id.ps == RDMA_PS_UDP)
- mc_map[7] = 0x01; /* Use RDMA CM signature */
- *mgid = *(union ib_gid *) (mc_map + 4);
} else {
ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
@@ -2749,7 +2716,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
if (id_priv->id.ps == RDMA_PS_UDP)
rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
- rdma_addr_get_sgid(dev_addr, &rec.port_gid);
+ ib_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1;
@@ -2848,7 +2815,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
dev_addr = &id_priv->id.route.addr.dev_addr;
- if ((dev_addr->bound_dev_if == ndev->ifindex) &&
+ if ((dev_addr->src_dev == ndev) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
ndev->name, &id_priv->id);
diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c
index 7e1ffd8ccd5c..82543716d59e 100644
--- a/trunk/drivers/infiniband/core/sa_query.c
+++ b/trunk/drivers/infiniband/core/sa_query.c
@@ -604,12 +604,6 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
return ret ? ret : id;
}
-void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
-{
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
-}
-EXPORT_SYMBOL(ib_sa_unpack_path);
-
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
diff --git a/trunk/drivers/infiniband/core/ucma.c b/trunk/drivers/infiniband/core/ucma.c
index b2e16c332d5b..bb96d3c4b0f4 100644
--- a/trunk/drivers/infiniband/core/ucma.c
+++ b/trunk/drivers/infiniband/core/ucma.c
@@ -43,7 +43,6 @@
#include
#include
#include
-#include
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
@@ -563,10 +562,10 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
switch (route->num_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
- rdma_addr_get_dgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].dgid);
- rdma_addr_get_sgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].sgid);
+ ib_addr_get_dgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].dgid);
+ ib_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
break;
case 2:
@@ -813,51 +812,6 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
return ret;
}
-static int ucma_set_ib_path(struct ucma_context *ctx,
- struct ib_path_rec_data *path_data, size_t optlen)
-{
- struct ib_sa_path_rec sa_path;
- struct rdma_cm_event event;
- int ret;
-
- if (optlen % sizeof(*path_data))
- return -EINVAL;
-
- for (; optlen; optlen -= sizeof(*path_data), path_data++) {
- if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
- IB_PATH_BIDIRECTIONAL))
- break;
- }
-
- if (!optlen)
- return -EINVAL;
-
- ib_sa_unpack_path(path_data->path_rec, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
- if (ret)
- return ret;
-
- memset(&event, 0, sizeof event);
- event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- return ucma_event_handler(ctx->cm_id, &event);
-}
-
-static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
- void *optval, size_t optlen)
-{
- int ret;
-
- switch (optname) {
- case RDMA_OPTION_IB_PATH:
- ret = ucma_set_ib_path(ctx, optval, optlen);
- break;
- default:
- ret = -ENOSYS;
- }
-
- return ret;
-}
-
static int ucma_set_option_level(struct ucma_context *ctx, int level,
int optname, void *optval, size_t optlen)
{
@@ -867,9 +821,6 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
case RDMA_OPTION_ID:
ret = ucma_set_option_id(ctx, optname, optval, optlen);
break;
- case RDMA_OPTION_IB:
- ret = ucma_set_option_ib(ctx, optname, optval, optlen);
- break;
default:
ret = -ENOSYS;
}
diff --git a/trunk/drivers/infiniband/core/uverbs_cmd.c b/trunk/drivers/infiniband/core/uverbs_cmd.c
index 112d3970222a..56feab6c251e 100644
--- a/trunk/drivers/infiniband/core/uverbs_cmd.c
+++ b/trunk/drivers/infiniband/core/uverbs_cmd.c
@@ -285,7 +285,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext = ibdev->alloc_ucontext(ibdev, &udata);
if (IS_ERR(ucontext)) {
- ret = PTR_ERR(ucontext);
+ ret = PTR_ERR(file->ucontext);
goto err;
}
diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_qp.c b/trunk/drivers/infiniband/hw/amso1100/c2_qp.c
index ad518868df77..a6d89440ad2c 100644
--- a/trunk/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/trunk/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -798,10 +798,8 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
u8 actual_sge_count;
u32 msg_size;
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
+ if (qp->state > IB_QPS_RTS)
+ return -EINVAL;
while (ib_wr) {
@@ -932,7 +930,6 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
ib_wr = ib_wr->next;
}
-out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -947,10 +944,8 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
unsigned long lock_flags;
int err = 0;
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
+ if (qp->state > IB_QPS_RTS)
+ return -EINVAL;
/*
* Try and post each work request
@@ -1003,7 +998,6 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
ib_wr = ib_wr->next;
}
-out:
if (err)
*bad_wr = ib_wr;
return err;
diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d7..1cecf98829ac 100644
--- a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -365,19 +365,18 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if (num_wrs <= 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
+ *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -429,8 +428,10 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->opcode);
err = -EINVAL;
}
- if (err)
+ if (err) {
+ *bad_wr = wr;
break;
+ }
wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
sqp->wr_id = wr->wr_id;
sqp->opcode = wr2opcode(t3_wr_opcode);
@@ -453,10 +454,6 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
return err;
}
@@ -474,19 +471,18 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
qhp->wq.rq_size_log2) - 1;
if (!wr) {
spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
+ return -EINVAL;
}
while (wr) {
if (wr->num_sge > T3_MAX_SGE) {
err = -EINVAL;
+ *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -498,10 +494,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
err = build_zero_stag_recv(qhp, wqe, wr);
else
err = -ENOMEM;
-
- if (err)
+ if (err) {
+ *bad_wr = wr;
break;
-
+ }
build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
@@ -515,10 +511,6 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
return err;
}
diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_classes.h b/trunk/drivers/infiniband/hw/ehca/ehca_classes.h
index 0136abd50dd4..c825142a2fb7 100644
--- a/trunk/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/trunk/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -375,7 +375,6 @@ extern rwlock_t ehca_qp_idr_lock;
extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_eq.c b/trunk/drivers/infiniband/hw/ehca/ehca_eq.c
index 3b87589b8ea0..523e733c630e 100644
--- a/trunk/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/trunk/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -169,16 +169,13 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
unsigned long flags;
u64 h_ret;
+ spin_lock_irqsave(&eq->spinlock, flags);
ibmebus_free_irq(eq->ist, (void *)shca);
- spin_lock_irqsave(&shca_list_lock, flags);
- eq->is_initialized = 0;
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- tasklet_kill(&eq->interrupt_task);
-
h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't free EQ resources.");
return -EINVAL;
diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_main.c b/trunk/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e3..fb2d83c5bf01 100644
--- a/trunk/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/trunk/drivers/infiniband/hw/ehca/ehca_main.c
@@ -123,7 +123,7 @@ DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
+static DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c
index e3ec7fdd67bd..8fd88cd828fd 100644
--- a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -400,6 +400,7 @@ static inline void map_ib_wc_status(u32 cqe_status,
static inline int post_one_send(struct ehca_qp *my_qp,
struct ib_send_wr *cur_send_wr,
+ struct ib_send_wr **bad_send_wr,
int hidden)
{
struct ehca_wqe *wqe_p;
@@ -411,6 +412,8 @@ static inline int post_one_send(struct ehca_qp *my_qp,
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
+ if (bad_send_wr)
+ *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
@@ -430,6 +433,8 @@ static inline int post_one_send(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset;
+ if (bad_send_wr)
+ *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Could not write WQE "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -EINVAL;
@@ -443,6 +448,7 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr **bad_send_wr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ib_send_wr *cur_send_wr;
int wqe_cnt = 0;
int ret = 0;
unsigned long flags;
@@ -451,8 +457,7 @@ int ehca_post_send(struct ib_qp *qp,
if (unlikely(my_qp->state < IB_QPS_RTS)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* LOCK the QUEUE */
@@ -471,21 +476,24 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr circ_wr;
memset(&circ_wr, 0, sizeof(circ_wr));
circ_wr.opcode = IB_WR_RDMA_READ;
- post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
+ post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
wqe_cnt++;
ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
my_qp->message_count = my_qp->packet_count = 0;
}
/* loop processes list of send reqs */
- while (send_wr) {
- ret = post_one_send(my_qp, send_wr, 0);
+ for (cur_send_wr = send_wr; cur_send_wr != NULL;
+ cur_send_wr = cur_send_wr->next) {
+ ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
if (unlikely(ret)) {
+ /* if one or more WQEs were successful, don't fail */
+ if (wqe_cnt)
+ ret = 0;
goto post_send_exit0;
}
wqe_cnt++;
- send_wr = send_wr->next;
- }
+ } /* eof for cur_send_wr */
post_send_exit0:
iosync(); /* serialize GAL register access */
@@ -495,10 +503,6 @@ int ehca_post_send(struct ib_qp *qp,
my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-
-out:
- if (ret)
- *bad_send_wr = send_wr;
return ret;
}
@@ -507,6 +511,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
+ struct ib_recv_wr *cur_recv_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
@@ -517,23 +522,27 @@ static int internal_post_recv(struct ehca_qp *my_qp,
if (unlikely(!HAS_RQ(my_qp))) {
ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
my_qp, my_qp->real_qp_num, my_qp->ext_type);
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_r, flags);
- /* loop processes list of recv reqs */
- while (recv_wr) {
+ /* loop processes list of send reqs */
+ for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
+ cur_recv_wr = cur_recv_wr->next) {
u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- ret = -ENOMEM;
- ehca_err(dev, "Too many posted WQEs "
- "qp_num=%x", my_qp->real_qp_num);
+ if (bad_recv_wr)
+ *bad_recv_wr = cur_recv_wr;
+ if (wqe_cnt == 0) {
+ ret = -ENOMEM;
+ ehca_err(dev, "Too many posted WQEs "
+ "qp_num=%x", my_qp->real_qp_num);
+ }
goto post_recv_exit0;
}
/*
@@ -543,7 +552,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
/* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
rq_map_idx);
/*
* if something failed,
@@ -551,20 +560,22 @@ static int internal_post_recv(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_rqueue.current_q_offset = start_offset;
- ret = -EINVAL;
- ehca_err(dev, "Could not write WQE "
- "qp_num=%x", my_qp->real_qp_num);
+ *bad_recv_wr = cur_recv_wr;
+ if (wqe_cnt == 0) {
+ ret = -EINVAL;
+ ehca_err(dev, "Could not write WQE "
+ "qp_num=%x", my_qp->real_qp_num);
+ }
goto post_recv_exit0;
}
qmap_entry = &my_qp->rq_map.map[rq_map_idx];
- qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
+ qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 1;
wqe_cnt++;
- recv_wr = recv_wr->next;
- } /* eof for recv_wr */
+ } /* eof for cur_recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
@@ -573,11 +584,6 @@ static int internal_post_recv(struct ehca_qp *my_qp,
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
-
-out:
- if (ret)
- *bad_recv_wr = recv_wr;
-
return ret;
}
@@ -591,7 +597,6 @@ int ehca_post_recv(struct ib_qp *qp,
if (unlikely(my_qp->state == IB_QPS_RESET)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
- *bad_recv_wr = recv_wr;
return -EINVAL;
}
diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c
index d2787fe80304..013d1380e77c 100644
--- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,7 +39,6 @@
#include
#include
#include
-#include
#include "ipath_kernel.h"
#include "ipath_verbs.h"
@@ -1698,7 +1697,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail)
{
unsigned long flags;
- unsigned end, cnt = 0;
+ unsigned end, cnt = 0, next;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
@@ -1749,7 +1748,12 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
if (dd->ipath_pioupd_thresh) {
end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
- cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
+ next = find_first_bit(dd->ipath_pioavailkernel, end);
+ while (next < end) {
+ cnt++;
+ next = find_next_bit(dd->ipath_pioavailkernel, end,
+ next + 1);
+ }
}
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c
index e596537ff353..3cb3f47a10b8 100644
--- a/trunk/drivers/infiniband/hw/mlx4/main.c
+++ b/trunk/drivers/infiniband/hw/mlx4/main.c
@@ -103,7 +103,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
- if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+ if (dev->dev->caps.max_gso_sz)
props->device_cap_flags |= IB_DEVICE_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c
index 989555cee883..256a00c6aeea 100644
--- a/trunk/drivers/infiniband/hw/mlx4/qp.c
+++ b/trunk/drivers/infiniband/hw/mlx4/qp.c
@@ -54,8 +54,7 @@ enum {
/*
* Largest possible UD header: send with GRH and immediate data.
*/
- MLX4_IB_UD_HEADER_SIZE = 72,
- MLX4_IB_LSO_HEADER_SPARE = 128,
+ MLX4_IB_UD_HEADER_SIZE = 72
};
struct mlx4_ib_sqp {
@@ -68,8 +67,7 @@ struct mlx4_ib_sqp {
};
enum {
- MLX4_IB_MIN_SQ_STRIDE = 6,
- MLX4_IB_CACHE_LINE_SIZE = 64,
+ MLX4_IB_MIN_SQ_STRIDE = 6
};
static const __be32 mlx4_ib_opcode[] = {
@@ -263,7 +261,7 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
case IB_QPT_UD:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_datagram_seg) +
- ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
+ ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
case IB_QPT_UC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
@@ -899,6 +897,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(ibqp->qp_type) << 16));
+ context->flags |= cpu_to_be32(1 << 8); /* DE? */
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
@@ -1468,12 +1467,16 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz, __be32 *blh)
+ __be32 *lso_hdr_sz)
{
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
- if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
- *blh = cpu_to_be32(1 << 6);
+ /*
+ * This is a temporary limitation and will be removed in
+ * a forthcoming FW release:
+ */
+ if (unlikely(halign > 64))
+ return -EINVAL;
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
@@ -1519,7 +1522,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 dummy;
__be32 *lso_wqe;
__be32 uninitialized_var(lso_hdr_sz);
- __be32 blh;
int i;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1528,7 +1530,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
for (nreq = 0; wr; ++nreq, wr = wr->next) {
lso_wqe = &dummy;
- blh = 0;
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
err = -ENOMEM;
@@ -1615,7 +1616,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
- err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
+ err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -1686,7 +1687,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
- (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
+ (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
diff --git a/trunk/drivers/infiniband/hw/nes/Kconfig b/trunk/drivers/infiniband/hw/nes/Kconfig
index 846dc97cf260..d449eb6ec78e 100644
--- a/trunk/drivers/infiniband/hw/nes/Kconfig
+++ b/trunk/drivers/infiniband/hw/nes/Kconfig
@@ -4,13 +4,14 @@ config INFINIBAND_NES
select LIBCRC32C
select INET_LRO
---help---
- This is the RDMA Network Interface Card (RNIC) driver for
- NetEffect Ethernet Cluster Server Adapters.
+ This is a low-level driver for NetEffect RDMA enabled
+ Network Interface Cards (RNIC).
config INFINIBAND_NES_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_NES
default n
---help---
- This option enables debug messages from the NetEffect RNIC
- driver. Select this if you are diagnosing a problem.
+ This option causes the NetEffect RNIC driver to produce debug
+ messages. Select this if you are developing the driver
+ or trying to diagnose a problem.
diff --git a/trunk/drivers/infiniband/hw/nes/nes.c b/trunk/drivers/infiniband/hw/nes/nes.c
index b9d09bafd6c1..cbde0cfe27e0 100644
--- a/trunk/drivers/infiniband/hw/nes/nes.c
+++ b/trunk/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -521,8 +521,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
spin_lock_init(&nesdev->indexed_regs_lock);
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
- mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0),
- pci_resource_len(pcidev, BAR_0));
+ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
if (mmio_regs == NULL) {
printk(KERN_ERR PFX "Unable to remap BAR0\n");
ret = -EIO;
diff --git a/trunk/drivers/infiniband/hw/nes/nes.h b/trunk/drivers/infiniband/hw/nes/nes.h
index 98840564bb2f..bcc6abc4faff 100644
--- a/trunk/drivers/infiniband/hw/nes/nes.h
+++ b/trunk/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c
index 39468c277036..73473db19863 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_cm.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,7 +52,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -252,33 +251,6 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
mpa_frame = (struct ietf_mpa_frame *)buffer;
cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
- /* make sure mpa private data len is less than 512 bytes */
- if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) {
- nes_debug(NES_DBG_CM, "The received Length of Private"
- " Data field exceeds 512 octets\n");
- return -EINVAL;
- }
- /*
- * make sure MPA receiver interoperate with the
- * received MPA version and MPA key information
- *
- */
- if (mpa_frame->rev != mpa_version) {
- nes_debug(NES_DBG_CM, "The received mpa version"
- " can not be interoperated\n");
- return -EINVAL;
- }
- if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
- nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
- return -EINVAL;
- }
- } else {
- if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
- nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
- return -EINVAL;
- }
- }
if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
@@ -514,8 +486,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
send_reset(cm_node, NULL);
break;
default:
- add_ref_cm_node(cm_node);
- send_reset(cm_node, NULL);
create_event(cm_node, NES_CM_EVENT_ABORTED);
}
}
@@ -979,7 +949,6 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
reset_entry);
{
struct nes_cm_node *loopback = cm_node->loopbackpartner;
- enum nes_cm_node_state old_state;
if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
rem_ref_cm_node(cm_node->cm_core, cm_node);
} else {
@@ -991,12 +960,11 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
NES_CM_STATE_CLOSED;
WARN_ON(1);
} else {
- old_state = cm_node->state;
- cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
- if (old_state != NES_CM_STATE_MPAREQ_RCVD)
- rem_ref_cm_node(
- cm_node->cm_core,
- cm_node);
+ cm_node->state =
+ NES_CM_STATE_CLOSED;
+ rem_ref_cm_node(
+ cm_node->cm_core,
+ cm_node);
}
} else {
struct nes_cm_event event;
@@ -1012,9 +980,20 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
cm_event_connect_error(&event);
- cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
loopback->state = NES_CM_STATE_CLOSED;
+ event.cm_node = cm_node;
+ event.cm_info.rem_addr =
+ cm_node->rem_addr;
+ event.cm_info.loc_addr =
+ cm_node->loc_addr;
+ event.cm_info.rem_port =
+ cm_node->rem_port;
+ event.cm_info.loc_port =
+ cm_node->loc_port;
+ event.cm_info.cm_id = cm_node->cm_id;
+ cm_event_reset(&event);
+
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1098,13 +1077,12 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
/**
* nes_addr_resolve_neigh
*/
-static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
+static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
{
struct rtable *rt;
struct flowi fl;
struct neighbour *neigh;
- int rc = arpindex;
- struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+ int rc = -1;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
@@ -1120,21 +1098,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
neigh->ha, ntohl(rt->rt_gateway));
-
- if (arpindex >= 0) {
- if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
- neigh->ha, ETH_ALEN)){
- /* Mac address same as in nes_arp_table */
- neigh_release(neigh);
- ip_rt_put(rt);
- return rc;
- }
-
- nes_manage_arp_cache(nesvnic->netdev,
- nesadapter->arp_table[arpindex].mac_addr,
- dst_ip, NES_ARP_DELETE);
- }
-
nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
@@ -1150,6 +1113,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
+
/**
* make_cm_node - create a new instance of a cm node
*/
@@ -1159,7 +1123,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
{
struct nes_cm_node *cm_node;
struct timespec ts;
- int oldarpindex = 0;
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
@@ -1213,18 +1176,17 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
nesadapter = nesdev->nesadapter;
cm_node->loopbackpartner = NULL;
-
/* get the mac addr for the remote node */
if (ipv4_is_loopback(htonl(cm_node->rem_addr)))
arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
- else {
- oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
- arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
-
- }
+ else
+ arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
if (arpindex < 0) {
- kfree(cm_node);
- return NULL;
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
+ if (arpindex < 0) {
+ kfree(cm_node);
+ return NULL;
+ }
}
/* copy the mac addr to node context */
@@ -1371,20 +1333,13 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
case NES_CM_STATE_MPAREJ_RCVD:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_LAST_ACK;
send_fin(cm_node, NULL);
break;
- case NES_CM_STATE_MPAREQ_SENT:
- create_event(cm_node, NES_CM_EVENT_ABORTED);
- cm_node->tcp_cntxt.rcv_nxt++;
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- add_ref_cm_node(cm_node);
- send_reset(cm_node, NULL);
- break;
case NES_CM_STATE_FIN_WAIT1:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
@@ -1635,7 +1590,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_TSA:
@@ -1687,15 +1641,9 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
passive_open_err(cm_node, skb, 1);
break;
case NES_CM_STATE_LISTENING:
- cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- send_reset(cm_node, skb);
- break;
case NES_CM_STATE_CLOSED:
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1764,13 +1712,8 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_LISTENING:
- cleanup_retrans_entry(cm_node);
- cm_node->state = NES_CM_STATE_CLOSED;
- send_reset(cm_node, skb);
- break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
- add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_LAST_ACK:
@@ -2031,7 +1974,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (!cm_node)
return NULL;
mpa_frame = &cm_node->mpa_frame;
- memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ);
mpa_frame->flags = IETF_MPA_FLAGS_CRC;
mpa_frame->rev = IETF_MPA_VERSION;
mpa_frame->priv_data_len = htons(private_data_len);
@@ -2159,39 +2102,30 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
cm_node->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_core, cm_node);
} else {
- if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
- rem_ref_cm_node(cm_core, cm_node);
- } else {
- ret = send_mpa_reject(cm_node);
- if (ret) {
- cm_node->state = NES_CM_STATE_CLOSED;
- err = send_reset(cm_node, NULL);
- if (err)
- WARN_ON(1);
- } else
- cm_id->add_ref(cm_id);
- }
+ ret = send_mpa_reject(cm_node);
+ if (ret) {
+ cm_node->state = NES_CM_STATE_CLOSED;
+ err = send_reset(cm_node, NULL);
+ if (err)
+ WARN_ON(1);
+ } else
+ cm_id->add_ref(cm_id);
}
} else {
cm_node->cm_id = NULL;
- if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
- rem_ref_cm_node(cm_core, cm_node);
- rem_ref_cm_node(cm_core, loopback);
- } else {
- event.cm_node = loopback;
- event.cm_info.rem_addr = loopback->rem_addr;
- event.cm_info.loc_addr = loopback->loc_addr;
- event.cm_info.rem_port = loopback->rem_port;
- event.cm_info.loc_port = loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- cm_event_mpa_reject(&event);
- rem_ref_cm_node(cm_core, cm_node);
- loopback->state = NES_CM_STATE_CLOSING;
+ event.cm_node = loopback;
+ event.cm_info.rem_addr = loopback->rem_addr;
+ event.cm_info.loc_addr = loopback->loc_addr;
+ event.cm_info.rem_port = loopback->rem_port;
+ event.cm_info.loc_port = loopback->loc_port;
+ event.cm_info.cm_id = loopback->cm_id;
+ cm_event_mpa_reject(&event);
+ rem_ref_cm_node(cm_core, cm_node);
+ loopback->state = NES_CM_STATE_CLOSING;
- cm_id = loopback->cm_id;
- rem_ref_cm_node(cm_core, loopback);
- cm_id->rem_ref(cm_id);
- }
+ cm_id = loopback->cm_id;
+ rem_ref_cm_node(cm_core, loopback);
+ cm_id->rem_ref(cm_id);
}
return ret;
@@ -2230,15 +2164,11 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
case NES_CM_STATE_CLOSING:
ret = -1;
break;
- case NES_CM_STATE_LISTENING:
- cleanup_retrans_entry(cm_node);
- send_reset(cm_node, NULL);
- break;
case NES_CM_STATE_MPAREJ_RCVD:
+ case NES_CM_STATE_LISTENING:
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
case NES_CM_STATE_CLOSED:
- case NES_CM_STATE_LISTENER_DESTROYED:
ret = rem_ref_cm_node(cm_core, cm_node);
break;
case NES_CM_STATE_TSA:
@@ -2757,6 +2687,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_pd *nespd;
u64 tagged_offset;
+
+
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
@@ -2772,13 +2704,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
"%s\n", cm_node, nesvnic, nesvnic->netdev,
nesvnic->netdev->name);
- if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) {
- if (cm_node->loopbackpartner)
- rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner);
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return -EINVAL;
- }
-
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2861,10 +2786,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cpu_to_le32(conn_param->private_data_len +
sizeof(struct ietf_mpa_frame));
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -3008,7 +2929,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
return -EINVAL;
- memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
if (loopback) {
memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
loopback->mpa_frame.priv_data_len = pdata_len;
@@ -3053,9 +2974,6 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!nesdev)
return -EINVAL;
- if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
- return -EINVAL;
-
nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
"0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
ntohl(nesvnic->local_ipaddr),
@@ -3333,11 +3251,6 @@ static void cm_event_connected(struct nes_cm_event *event)
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
- if (nesqp->sq_kmapped) {
- nesqp->sq_kmapped = 0;
- kunmap(nesqp->page);
- }
-
/* use the reserved spot on the WQ for the extra first WQE */
nesqp->nesqp_context->ird_ord_sizes &=
cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -3433,7 +3346,7 @@ static void cm_event_connect_error(struct nes_cm_event *event)
nesqp->cm_id = NULL;
cm_id->provider_data = NULL;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = -ECONNRESET;
+ cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
@@ -3477,8 +3390,6 @@ static void cm_event_reset(struct nes_cm_event *event)
nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
- if (!nesqp)
- return;
nesqp->cm_id = NULL;
/* cm_id->provider_data = NULL; */
@@ -3490,8 +3401,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- cm_id->add_ref(cm_id);
ret = cm_id->event_handler(cm_id, &cm_event);
+ cm_id->add_ref(cm_id);
atomic_inc(&cm_closes);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = IW_CM_EVENT_STATUS_OK;
diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.h b/trunk/drivers/infiniband/hw/nes/nes_cm.h
index d9825fda70a1..90e8e4d8a5ce 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_cm.h
+++ b/trunk/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -47,8 +47,6 @@
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
-#define IETF_MAX_PRIV_DATA_LEN 512
-#define IETF_MPA_FRAME_SIZE 20
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -171,7 +169,7 @@ struct nes_timer_entry {
#define NES_CM_DEF_SEQ2 0x18ed5740
#define NES_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN)
+#define MAX_CM_BUFFER 512
typedef u32 nes_addr_t;
@@ -200,7 +198,6 @@ enum nes_cm_node_state {
NES_CM_STATE_TIME_WAIT,
NES_CM_STATE_LAST_ACK,
NES_CM_STATE_CLOSING,
- NES_CM_STATE_LISTENER_DESTROYED,
NES_CM_STATE_CLOSED
};
diff --git a/trunk/drivers/infiniband/hw/nes/nes_context.h b/trunk/drivers/infiniband/hw/nes/nes_context.h
index b4393a16099d..0fb8d81d9a62 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_context.h
+++ b/trunk/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.c b/trunk/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb88f09..3512d6de3019 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_hw.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -424,9 +424,8 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->base_pd = 1;
- nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
+ nesadapter->device_cap_flags =
+ IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -437,12 +436,11 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
- /* mark the usual suspect QPs, MR and CQs as in use */
+ /* mark the usual suspect QPs and CQs as in use */
for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
set_bit(u32temp, nesadapter->allocated_qps);
set_bit(u32temp, nesadapter->allocated_cqs);
}
- set_bit(0, nesadapter->allocated_mrs);
for (u32temp = 0; u32temp < 20; u32temp++)
set_bit(u32temp, nesadapter->allocated_pds);
@@ -483,7 +481,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
nesadapter->max_sge = 4;
- nesadapter->max_cqe = 32766;
+ nesadapter->max_cqe = 32767;
if (nes_read_eeprom_values(nesdev, nesadapter)) {
printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
@@ -1357,8 +1355,6 @@ int nes_init_phy(struct nes_device *nesdev)
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D)) {
- u32 first_time = 1;
-
/* Check firmware heartbeat */
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
@@ -1366,13 +1362,8 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (temp_phy_data != temp_phy_data2) {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((temp_phy_data & 0xff) > 0x20)
- return 0;
- printk(PFX "Reinitializing PHY\n");
- }
+ if (temp_phy_data != temp_phy_data2)
+ return 0;
/* no heartbeat, configure the PHY */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
@@ -1408,7 +1399,7 @@ int nes_init_phy(struct nes_device *nesdev)
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
do {
if (counter++ > 150) {
- printk(PFX "No PHY heartbeat\n");
+ nes_debug(NES_DBG_PHY, "No PHY heartbeat\n");
break;
}
mdelay(1);
@@ -1422,20 +1413,11 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
if (counter++ > 300) {
- if (((temp_phy_data & 0xff) == 0x0) && first_time) {
- first_time = 0;
- counter = 0;
- /* reset AMCC PHY and try again */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
- continue;
- } else {
- printk(PFX "PHY did not track\n");
- break;
- }
+ nes_debug(NES_DBG_PHY, "PHY did not track\n");
+ break;
}
mdelay(10);
- } while ((temp_phy_data & 0xff) < 0x30);
+ } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
/* setup signal integrity */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.h b/trunk/drivers/infiniband/hw/nes/nes_hw.h
index 084be0ee689b..f28a41ba9fa1 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_hw.h
+++ b/trunk/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -546,23 +546,11 @@ enum nes_iwarp_sq_fmr_wqe_word_idx {
NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
};
-enum nes_iwarp_sq_fmr_opcodes {
- NES_IWARP_SQ_FMR_WQE_ZERO_BASED = (1<<6),
- NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K = (0<<7),
- NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M = (1<<7),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ = (1<<16),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE = (1<<17),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ = (1<<18),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
- NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND = (1<<20),
-};
-
-#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK 0xFF;
-
enum nes_iwarp_sq_locinv_wqe_word_idx {
NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
};
+
enum nes_iwarp_rq_wqe_word_idx {
NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
@@ -1165,19 +1153,6 @@ struct nes_pbl {
/* TODO: need to add list for two level tables */
};
-#define NES_4K_PBL_CHUNK_SIZE 4096
-
-struct nes_fast_mr_wqe_pbl {
- u64 *kva;
- dma_addr_t paddr;
-};
-
-struct nes_ib_fast_reg_page_list {
- struct ib_fast_reg_page_list ibfrpl;
- struct nes_fast_mr_wqe_pbl nes_wqe_pbl;
- u64 pbl;
-};
-
struct nes_listener {
struct work_struct work;
struct workqueue_struct *wq;
diff --git a/trunk/drivers/infiniband/hw/nes/nes_nic.c b/trunk/drivers/infiniband/hw/nes/nes_nic.c
index ab1102780186..de18fdfdadf2 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_nic.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/trunk/drivers/infiniband/hw/nes/nes_user.h b/trunk/drivers/infiniband/hw/nes/nes_user.h
index 71e133ab209b..cc90c14b49eb 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_user.h
+++ b/trunk/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@@ -86,7 +86,6 @@ enum iwnes_memreg_type {
IWNES_MEMREG_TYPE_CQ = 0x0002,
IWNES_MEMREG_TYPE_MW = 0x0003,
IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
};
struct nes_mem_reg_req {
diff --git a/trunk/drivers/infiniband/hw/nes/nes_utils.c b/trunk/drivers/infiniband/hw/nes/nes_utils.c
index 729d525c5b70..9687c397ce1a 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_utils.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/trunk/drivers/infiniband/hw/nes/nes_verbs.c b/trunk/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136e3747..a680c42d6e8c 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -275,236 +275,342 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
}
-/*
- * nes_alloc_fast_mr
+/**
+ * nes_alloc_fmr
*/
-static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
- u32 stag, u32 page_count)
+static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
+ int ibmr_access_flags,
+ struct ib_fmr_attr *ibfmr_attr)
{
- struct nes_hw_cqp_wqe *cqp_wqe;
- struct nes_cqp_request *cqp_request;
unsigned long flags;
- int ret;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_fmr *nesfmr;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
u32 opcode = 0;
- u16 major_code;
- u64 region_length = page_count * PAGE_SIZE;
+ u8 stag_key = 0;
+ int i=0;
+ struct nes_vpbl vpbl;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (ret) {
+ goto failed_resource_alloc;
+ }
+
+ nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
+ if (!nesfmr) {
+ ret = -ENOMEM;
+ goto failed_fmr_alloc;
+ }
+
+ nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
+ if (ibfmr_attr->max_pages == 1) {
+ /* use zero length PBL */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 0;
+ } else if (ibfmr_attr->max_pages <= 32) {
+ /* use PBL 256 */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 1;
+ } else if (ibfmr_attr->max_pages <= 512) {
+ /* use 4K PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1;
+ } else {
+ /* use two level 4K PBLs */
+ /* add support for two level 256B PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
+ ((ibfmr_attr->max_pages & 511) ? 1 : 0);
+ }
+ /* Register the region with the adapter */
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ /* track PBL resources */
+ if (nesfmr->nesmr.pbls_used != 0) {
+ if (nesfmr->nesmr.pbl_4k) {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_avail;
+ } else {
+ nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
+ }
+ } else {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_avail;
+ } else {
+ nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
+ }
+ }
+ }
+
+ /* one level pbl */
+ if (nesfmr->nesmr.pbls_used == 0) {
+ nesfmr->root_vpbl.pbl_vbase = NULL;
+ nes_debug(NES_DBG_MR, "zero level pbl \n");
+ } else if (nesfmr->nesmr.pbls_used == 1) {
+ /* can change it to kmalloc & dma_map_single */
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+ nesfmr->leaf_pbl_cnt = 0;
+ nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
+ nesfmr->root_vpbl.pbl_vbase);
+ }
+ /* two level pbl */
+ else {
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+
+ nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
+ nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
+ if (!nesfmr->root_vpbl.leaf_vpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_leaf_vpbl_alloc;
+ }
+
+ nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
+ " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
+ nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
+
+ for (i=0; i