From 2c84dc0a003ea0ca37982c172e06681de0ec1bc5 Mon Sep 17 00:00:00 2001 From: Auke Kok Date: Mon, 28 Aug 2006 14:56:16 -0700 Subject: [PATCH] --- yaml --- r: 35197 b: refs/heads/master c: 8fc897b00a7d81ffaa24e18881c2d6b10698ab0b h: refs/heads/master i: 35195: 87e3aab83a6f43cdaac624ae31ada90f3305ce09 v: v3 --- [refs] | 2 +- trunk/CREDITS | 2 +- trunk/Documentation/connector/ucon.c | 206 ---- trunk/Documentation/cpusets.txt | 6 - trunk/Documentation/filesystems/00-INDEX | 4 +- trunk/Documentation/filesystems/relay.txt | 479 -------- trunk/Documentation/filesystems/relayfs.txt | 442 +++++++ trunk/Documentation/input/joystick.txt | 1 + trunk/Documentation/scsi/ChangeLog.megaraid | 123 -- trunk/Documentation/sysctl/fs.txt | 20 - trunk/Documentation/sysctl/kernel.txt | 20 + trunk/MAINTAINERS | 6 - trunk/Makefile | 2 +- trunk/arch/arm/common/dmabounce.c | 8 +- trunk/arch/arm/kernel/entry-armv.S | 21 +- trunk/arch/arm/kernel/head.S | 2 +- trunk/arch/arm/mach-s3c2410/Makefile | 36 +- trunk/arch/arm/mach-s3c2410/dma.c | 163 +-- trunk/arch/arm/mach-versatile/core.c | 2 +- trunk/arch/i386/Kconfig | 4 +- trunk/arch/i386/kernel/acpi/boot.c | 2 +- trunk/arch/i386/kernel/acpi/wakeup.S | 5 +- .../i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 9 +- trunk/arch/i386/pci/init.c | 8 +- trunk/arch/i386/pci/mmconfig.c | 2 +- trunk/arch/ia64/hp/sim/simscsi.c | 3 +- trunk/arch/ia64/kernel/acpi.c | 2 +- trunk/arch/powerpc/boot/dts/mpc8540ads.dts | 257 ----- trunk/arch/powerpc/boot/dts/mpc8541cds.dts | 244 ---- trunk/arch/powerpc/boot/dts/mpc8548cds.dts | 287 ----- trunk/arch/powerpc/boot/dts/mpc8555cds.dts | 244 ---- trunk/arch/powerpc/kernel/legacy_serial.c | 8 +- trunk/arch/powerpc/kernel/prom_parse.c | 13 +- trunk/arch/powerpc/kernel/time.c | 25 +- trunk/arch/powerpc/kernel/traps.c | 8 +- trunk/arch/powerpc/mm/hugetlbpage.c | 2 +- trunk/arch/powerpc/platforms/85xx/Kconfig | 1 + .../arch/powerpc/platforms/85xx/mpc85xx_ads.c | 162 ++- .../arch/powerpc/platforms/85xx/mpc85xx_cds.c | 210 ++-- .../powerpc/platforms/86xx/mpc8641_hpcn.h | 32 + .../powerpc/platforms/86xx/mpc86xx_hpcn.c | 324 +++--- .../platforms/embedded6xx/mpc7448_hpc2.c | 73 +- .../powerpc/platforms/powermac/bootx_init.c | 15 +- trunk/arch/powerpc/sysdev/fsl_soc.c | 30 +- trunk/arch/powerpc/sysdev/tsi108_dev.c | 10 +- trunk/arch/powerpc/sysdev/tsi108_pci.c | 21 +- trunk/arch/sparc/kernel/setup.c | 4 +- trunk/arch/sparc/kernel/smp.c | 1 + trunk/arch/sparc/kernel/sun4d_smp.c | 2 +- trunk/arch/sparc/kernel/sun4m_smp.c | 2 + trunk/block/cfq-iosched.c | 2 +- trunk/block/elevator.c | 3 +- trunk/block/ll_rw_blk.c | 2 - trunk/drivers/acpi/ac.c | 2 - trunk/drivers/acpi/acpi_memhotplug.c | 8 +- trunk/drivers/acpi/battery.c | 3 - trunk/drivers/acpi/bus.c | 11 +- trunk/drivers/acpi/hotkey.c | 281 +++-- trunk/drivers/acpi/i2c_ec.c | 2 +- trunk/drivers/acpi/osl.c | 10 - trunk/drivers/acpi/sbs.c | 3 - trunk/drivers/acpi/scan.c | 12 +- trunk/drivers/acpi/utils.c | 2 +- trunk/drivers/base/node.c | 2 +- trunk/drivers/cdrom/gscd.c | 2 +- trunk/drivers/char/moxa.c | 8 +- trunk/drivers/char/tty_io.c | 808 ++----------- trunk/drivers/char/tty_ioctl.c | 59 +- trunk/drivers/char/vt_ioctl.c | 2 - trunk/drivers/hwmon/abituguru.c | 99 +- trunk/drivers/i2c/chips/tps65010.c | 12 +- trunk/drivers/ieee1394/ohci1394.c | 4 +- trunk/drivers/infiniband/core/cache.c | 3 +- trunk/drivers/infiniband/core/sa_query.c | 3 +- .../drivers/infiniband/hw/mthca/mthca_main.c | 6 +- .../infiniband/hw/mthca/mthca_provider.c | 11 +- .../infiniband/hw/mthca/mthca_provider.h | 4 +- trunk/drivers/infiniband/hw/mthca/mthca_qp.c | 54 +- .../drivers/infiniband/ulp/iser/iscsi_iser.c | 22 +- trunk/drivers/input/keyboard/atkbd.c | 2 +- trunk/drivers/input/misc/wistron_btns.c | 16 +- trunk/drivers/input/mouse/psmouse-base.c | 7 + trunk/drivers/md/dm-raid1.c | 4 +- trunk/drivers/md/md.c | 13 - trunk/drivers/md/raid1.c | 7 +- trunk/drivers/message/fusion/mptbase.h | 1 + trunk/drivers/message/fusion/mptfc.c | 92 +- trunk/drivers/mtd/nand/ams-delta.c | 10 +- trunk/drivers/mtd/nand/nand_base.c | 6 +- trunk/drivers/net/e1000/e1000.h | 2 +- trunk/drivers/net/e1000/e1000_ethtool.c | 52 +- trunk/drivers/net/e1000/e1000_hw.c | 1024 +++++++++-------- trunk/drivers/net/e1000/e1000_hw.h | 25 +- trunk/drivers/net/e1000/e1000_main.c | 5 +- trunk/drivers/net/skge.c | 88 +- trunk/drivers/net/sky2.c | 152 ++- trunk/drivers/net/sky2.h | 3 +- trunk/drivers/pci/hotplug/Kconfig | 2 +- trunk/drivers/pci/hotplug/cpci_hotplug_pci.c | 54 +- trunk/drivers/pci/pci-driver.c | 3 +- trunk/drivers/pci/quirks.c | 59 +- trunk/drivers/rtc/rtc-s3c.c | 124 +- trunk/drivers/s390/block/dasd_devmap.c | 8 +- trunk/drivers/s390/block/dasd_eckd.c | 14 +- trunk/drivers/s390/scsi/zfcp_aux.c | 120 +- trunk/drivers/s390/scsi/zfcp_ccw.c | 5 - trunk/drivers/s390/scsi/zfcp_def.h | 15 +- trunk/drivers/s390/scsi/zfcp_erp.c | 212 ++-- trunk/drivers/s390/scsi/zfcp_ext.h | 9 +- trunk/drivers/s390/scsi/zfcp_fsf.c | 122 +- trunk/drivers/s390/scsi/zfcp_qdio.c | 79 +- trunk/drivers/s390/scsi/zfcp_scsi.c | 73 +- trunk/drivers/scsi/ata_piix.c | 84 +- trunk/drivers/scsi/esp.c | 3 +- trunk/drivers/scsi/hptiop.c | 568 ++++++++- trunk/drivers/scsi/ide-scsi.c | 2 +- trunk/drivers/scsi/iscsi_tcp.c | 209 ++-- trunk/drivers/scsi/iscsi_tcp.h | 2 + trunk/drivers/scsi/libata-core.c | 2 +- trunk/drivers/scsi/libiscsi.c | 214 ++-- trunk/drivers/scsi/lpfc/lpfc_attr.c | 101 +- trunk/drivers/scsi/lpfc/lpfc_crtn.h | 1 - trunk/drivers/scsi/lpfc/lpfc_ct.c | 13 +- trunk/drivers/scsi/lpfc/lpfc_els.c | 21 +- trunk/drivers/scsi/lpfc/lpfc_hbadisc.c | 15 +- trunk/drivers/scsi/lpfc/lpfc_init.c | 13 +- trunk/drivers/scsi/lpfc/lpfc_mbox.c | 16 - trunk/drivers/scsi/lpfc/lpfc_nportdisc.c | 24 +- trunk/drivers/scsi/lpfc/lpfc_scsi.c | 21 +- trunk/drivers/scsi/lpfc/lpfc_sli.c | 57 +- trunk/drivers/scsi/lpfc/lpfc_sli.h | 20 - trunk/drivers/scsi/lpfc/lpfc_version.h | 2 +- trunk/drivers/scsi/megaraid/mega_common.h | 6 - trunk/drivers/scsi/megaraid/megaraid_ioctl.h | 4 - trunk/drivers/scsi/megaraid/megaraid_mbox.c | 42 +- trunk/drivers/scsi/megaraid/megaraid_mbox.h | 4 +- trunk/drivers/scsi/megaraid/megaraid_mm.c | 2 +- trunk/drivers/scsi/megaraid/megaraid_mm.h | 4 +- trunk/drivers/scsi/pdc_adma.c | 3 +- trunk/drivers/scsi/qla2xxx/qla_def.h | 1 - trunk/drivers/scsi/qla2xxx/qla_init.c | 11 - trunk/drivers/scsi/qla2xxx/qla_iocb.c | 1 - trunk/drivers/scsi/qla2xxx/qla_isr.c | 5 - trunk/drivers/scsi/qla2xxx/qla_os.c | 15 +- trunk/drivers/scsi/qla2xxx/qla_version.h | 4 +- trunk/drivers/scsi/sata_via.c | 117 +- trunk/drivers/scsi/scsi_error.c | 18 +- trunk/drivers/scsi/scsi_transport_iscsi.c | 15 +- trunk/drivers/scsi/sg.c | 8 +- trunk/drivers/scsi/sym53c8xx_2/sym_glue.c | 2 +- trunk/drivers/serial/sunsab.c | 9 - trunk/drivers/serial/sunzilog.c | 3 - trunk/drivers/usb/misc/cypress_cy7c63.c | 2 +- trunk/drivers/usb/serial/pl2303.c | 1 + trunk/drivers/usb/serial/pl2303.h | 4 + trunk/drivers/usb/storage/unusual_devs.h | 2 +- trunk/drivers/video/imacfb.c | 4 +- trunk/drivers/video/matrox/g450_pll.c | 8 - trunk/fs/block_dev.c | 114 +- trunk/fs/eventpoll.c | 4 +- trunk/fs/exec.c | 10 +- trunk/fs/ext2/super.c | 2 +- trunk/fs/ext3/balloc.c | 6 +- trunk/fs/ioprio.c | 30 +- trunk/fs/jbd/commit.c | 6 +- trunk/fs/jbd/journal.c | 92 +- trunk/fs/jbd/transaction.c | 9 +- trunk/fs/lockd/svcsubs.c | 15 +- trunk/fs/minix/inode.c | 13 +- trunk/fs/namei.c | 11 +- trunk/fs/nfs/file.c | 8 +- trunk/fs/nfs/idmap.c | 4 +- trunk/fs/nfs/nfs4proc.c | 29 +- trunk/fs/nfs/nfs4xdr.c | 21 +- trunk/fs/nfs/read.c | 23 +- trunk/fs/partitions/sun.c | 2 +- trunk/fs/proc/proc_misc.c | 2 +- trunk/fs/reiserfs/xattr.c | 2 +- trunk/fs/udf/super.c | 2 +- trunk/fs/udf/truncate.c | 64 +- trunk/fs/ufs/inode.c | 35 +- trunk/fs/ufs/truncate.c | 77 +- trunk/include/asm-arm/arch-s3c2410/dma.h | 1 - trunk/include/asm-arm/arch-s3c2410/regs-rtc.h | 2 +- trunk/include/asm-arm/procinfo.h | 1 - trunk/include/asm-i386/mmzone.h | 2 +- trunk/include/asm-powerpc/pgalloc.h | 2 +- trunk/include/asm-powerpc/system.h | 9 - trunk/include/asm-powerpc/tsi108.h | 14 +- trunk/include/asm-powerpc/tsi108_irq.h | 124 -- trunk/include/asm-sparc64/pgtable.h | 2 +- trunk/include/linux/compat_ioctl.h | 1 - trunk/include/linux/fs.h | 3 +- trunk/include/linux/ioprio.h | 23 +- trunk/include/linux/jbd.h | 3 - trunk/include/linux/netfilter_bridge.h | 14 +- trunk/include/linux/nfs_xdr.h | 2 +- trunk/include/linux/node.h | 10 +- trunk/include/linux/sunrpc/rpc_pipe_fs.h | 4 +- trunk/include/linux/sunrpc/xprt.h | 2 +- trunk/include/linux/tty.h | 1 - trunk/include/linux/vt.h | 1 - trunk/include/net/sctp/sctp.h | 13 + trunk/include/net/sctp/sm.h | 3 +- trunk/include/scsi/libiscsi.h | 19 +- trunk/include/scsi/scsi_transport_iscsi.h | 4 + trunk/kernel/cpuset.c | 35 +- trunk/kernel/futex.c | 2 +- trunk/kernel/sched.c | 4 +- trunk/kernel/stop_machine.c | 1 + trunk/lib/ts_bm.c | 11 +- trunk/mm/swapfile.c | 3 +- trunk/net/bridge/br_forward.c | 10 +- trunk/net/dccp/ccids/ccid3.c | 153 +-- trunk/net/dccp/ccids/ccid3.h | 9 +- trunk/net/dccp/ccids/lib/loss_interval.c | 36 +- trunk/net/dccp/ccids/lib/loss_interval.h | 9 +- trunk/net/dccp/ccids/lib/packet_history.c | 168 ++- trunk/net/dccp/ccids/lib/packet_history.h | 17 +- trunk/net/dccp/ccids/lib/tfrc.h | 2 +- trunk/net/dccp/ccids/lib/tfrc_equation.c | 2 +- trunk/net/dccp/dccp.h | 10 +- trunk/net/dccp/options.c | 2 +- trunk/net/ipv4/netfilter/arp_tables.c | 3 +- trunk/net/ipv4/tcp_output.c | 1 - trunk/net/ipv6/tcp_ipv6.c | 2 +- trunk/net/sctp/sm_make_chunk.c | 30 +- trunk/net/sctp/sm_statefuns.c | 20 +- trunk/net/sctp/socket.c | 10 +- trunk/net/sunrpc/auth_gss/auth_gss.c | 3 +- trunk/net/sunrpc/clnt.c | 30 +- trunk/net/sunrpc/rpc_pipe.c | 55 +- 232 files changed, 4041 insertions(+), 6514 deletions(-) delete mode 100644 trunk/Documentation/connector/ucon.c delete mode 100644 trunk/Documentation/filesystems/relay.txt create mode 100644 trunk/Documentation/filesystems/relayfs.txt delete mode 100644 trunk/arch/powerpc/boot/dts/mpc8540ads.dts delete mode 100644 trunk/arch/powerpc/boot/dts/mpc8541cds.dts delete mode 100644 trunk/arch/powerpc/boot/dts/mpc8548cds.dts delete mode 100644 trunk/arch/powerpc/boot/dts/mpc8555cds.dts delete mode 100644 trunk/include/asm-powerpc/tsi108_irq.h diff --git a/[refs] b/[refs] index e5f976e9844a..ec80f371f45c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a422142cfdf90d889d8d3e2affb8311a381530b7 +refs/heads/master: 8fc897b00a7d81ffaa24e18881c2d6b10698ab0b diff --git a/trunk/CREDITS b/trunk/CREDITS index 0fe904ebb7c7..29be6d1fdf49 100644 --- a/trunk/CREDITS +++ b/trunk/CREDITS @@ -2209,7 +2209,7 @@ S: (address available on request) S: USA N: Ian McDonald -E: ian.mcdonald@jandi.co.nz +E: iam4@cs.waikato.ac.nz E: imcdnzl@gmail.com W: http://wand.net.nz/~iam4 W: http://imcdnzl.blogspot.com diff --git a/trunk/Documentation/connector/ucon.c b/trunk/Documentation/connector/ucon.c deleted file mode 100644 index d738cde2a8d5..000000000000 --- a/trunk/Documentation/connector/ucon.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * ucon.c - * - * Copyright (c) 2004+ Evgeniy Polyakov - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include - -#include -#include -#include - -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include - -#define DEBUG -#define NETLINK_CONNECTOR 11 - -#ifdef DEBUG -#define ulog(f, a...) fprintf(stdout, f, ##a) -#else -#define ulog(f, a...) do {} while (0) -#endif - -static int need_exit; -static __u32 seq; - -static int netlink_send(int s, struct cn_msg *msg) -{ - struct nlmsghdr *nlh; - unsigned int size; - int err; - char buf[128]; - struct cn_msg *m; - - size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); - - nlh = (struct nlmsghdr *)buf; - nlh->nlmsg_seq = seq++; - nlh->nlmsg_pid = getpid(); - nlh->nlmsg_type = NLMSG_DONE; - nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); - nlh->nlmsg_flags = 0; - - m = NLMSG_DATA(nlh); -#if 0 - ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n", - __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack); -#endif - memcpy(m, msg, sizeof(*m) + msg->len); - - err = send(s, nlh, size, 0); - if (err == -1) - ulog("Failed to send: %s [%d].\n", - strerror(errno), errno); - - return err; -} - -int main(int argc, char *argv[]) -{ - int s; - char buf[1024]; - int len; - struct nlmsghdr *reply; - struct sockaddr_nl l_local; - struct cn_msg *data; - FILE *out; - time_t tm; - struct pollfd pfd; - - if (argc < 2) - out = stdout; - else { - out = fopen(argv[1], "a+"); - if (!out) { - ulog("Unable to open %s for writing: %s\n", - argv[1], strerror(errno)); - out = stdout; - } - } - - memset(buf, 0, sizeof(buf)); - - s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); - if (s == -1) { - perror("socket"); - return -1; - } - - l_local.nl_family = AF_NETLINK; - l_local.nl_groups = 0x123; /* bitmask of requested groups */ - l_local.nl_pid = 0; - - if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) { - perror("bind"); - close(s); - return -1; - } - -#if 0 - { - int on = 0x57; /* Additional group number */ - setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on)); - } -#endif - if (0) { - int i, j; - - memset(buf, 0, sizeof(buf)); - - data = (struct cn_msg *)buf; - - data->id.idx = 0x123; - data->id.val = 0x456; - data->seq = seq++; - data->ack = 0; - data->len = 0; - - for (j=0; j<10; ++j) { - for (i=0; i<1000; ++i) { - len = netlink_send(s, data); - } - - ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val); - } - - return 0; - } - - - pfd.fd = s; - - while (!need_exit) { - pfd.events = POLLIN; - pfd.revents = 0; - switch (poll(&pfd, 1, -1)) { - case 0: - need_exit = 1; - break; - case -1: - if (errno != EINTR) { - need_exit = 1; - break; - } - continue; - } - if (need_exit) - break; - - memset(buf, 0, sizeof(buf)); - len = recv(s, buf, sizeof(buf), 0); - if (len == -1) { - perror("recv buf"); - close(s); - return -1; - } - reply = (struct nlmsghdr *)buf; - - switch (reply->nlmsg_type) { - case NLMSG_ERROR: - fprintf(out, "Error message received.\n"); - fflush(out); - break; - case NLMSG_DONE: - data = (struct cn_msg *)NLMSG_DATA(reply); - - time(&tm); - fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n", - ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack); - fflush(out); - break; - default: - break; - } - } - - close(s); - return 0; -} diff --git a/trunk/Documentation/cpusets.txt b/trunk/Documentation/cpusets.txt index 76b44290c154..159e2a0c3e80 100644 --- a/trunk/Documentation/cpusets.txt +++ b/trunk/Documentation/cpusets.txt @@ -217,12 +217,6 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs) to represent the cpuset hierarchy provides for a familiar permission and name space for cpusets, with a minimum of additional kernel code. -The cpus file in the root (top_cpuset) cpuset is read-only. -It automatically tracks the value of cpu_online_map, using a CPU -hotplug notifier. If and when memory nodes can be hotplugged, -we expect to make the mems file in the root cpuset read-only -as well, and have it track the value of node_online_map. - 1.4 What are exclusive cpusets ? -------------------------------- diff --git a/trunk/Documentation/filesystems/00-INDEX b/trunk/Documentation/filesystems/00-INDEX index 16dec61d7671..66fdc0744fe0 100644 --- a/trunk/Documentation/filesystems/00-INDEX +++ b/trunk/Documentation/filesystems/00-INDEX @@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt - info on the 'in memory' filesystems ramfs, rootfs and initramfs. reiser4.txt - info on the Reiser4 filesystem based on dancing tree algorithms. -relay.txt - - info on relay, for efficient streaming from kernel to user space. +relayfs.txt + - info on relayfs, for efficient streaming from kernel to user space. romfs.txt - description of the ROMFS filesystem. smbfs.txt diff --git a/trunk/Documentation/filesystems/relay.txt b/trunk/Documentation/filesystems/relay.txt deleted file mode 100644 index d6788dae0349..000000000000 --- a/trunk/Documentation/filesystems/relay.txt +++ /dev/null @@ -1,479 +0,0 @@ -relay interface (formerly relayfs) -================================== - -The relay interface provides a means for kernel applications to -efficiently log and transfer large quantities of data from the kernel -to userspace via user-defined 'relay channels'. - -A 'relay channel' is a kernel->user data relay mechanism implemented -as a set of per-cpu kernel buffers ('channel buffers'), each -represented as a regular file ('relay file') in user space. Kernel -clients write into the channel buffers using efficient write -functions; these automatically log into the current cpu's channel -buffer. User space applications mmap() or read() from the relay files -and retrieve the data as it becomes available. The relay files -themselves are files created in a host filesystem, e.g. debugfs, and -are associated with the channel buffers using the API described below. - -The format of the data logged into the channel buffers is completely -up to the kernel client; the relay interface does however provide -hooks which allow kernel clients to impose some structure on the -buffer data. The relay interface doesn't implement any form of data -filtering - this also is left to the kernel client. The purpose is to -keep things as simple as possible. - -This document provides an overview of the relay interface API. The -details of the function parameters are documented along with the -functions in the relay interface code - please see that for details. - -Semantics -========= - -Each relay channel has one buffer per CPU, each buffer has one or more -sub-buffers. Messages are written to the first sub-buffer until it is -too full to contain a new message, in which case it it is written to -the next (if available). Messages are never split across sub-buffers. -At this point, userspace can be notified so it empties the first -sub-buffer, while the kernel continues writing to the next. - -When notified that a sub-buffer is full, the kernel knows how many -bytes of it are padding i.e. unused space occurring because a complete -message couldn't fit into a sub-buffer. Userspace can use this -knowledge to copy only valid data. - -After copying it, userspace can notify the kernel that a sub-buffer -has been consumed. - -A relay channel can operate in a mode where it will overwrite data not -yet collected by userspace, and not wait for it to be consumed. - -The relay channel itself does not provide for communication of such -data between userspace and kernel, allowing the kernel side to remain -simple and not impose a single interface on userspace. It does -provide a set of examples and a separate helper though, described -below. - -The read() interface both removes padding and internally consumes the -read sub-buffers; thus in cases where read(2) is being used to drain -the channel buffers, special-purpose communication between kernel and -user isn't necessary for basic operation. - -One of the major goals of the relay interface is to provide a low -overhead mechanism for conveying kernel data to userspace. While the -read() interface is easy to use, it's not as efficient as the mmap() -approach; the example code attempts to make the tradeoff between the -two approaches as small as possible. - -klog and relay-apps example code -================================ - -The relay interface itself is ready to use, but to make things easier, -a couple simple utility functions and a set of examples are provided. - -The relay-apps example tarball, available on the relay sourceforge -site, contains a set of self-contained examples, each consisting of a -pair of .c files containing boilerplate code for each of the user and -kernel sides of a relay application. When combined these two sets of -boilerplate code provide glue to easily stream data to disk, without -having to bother with mundane housekeeping chores. - -The 'klog debugging functions' patch (klog.patch in the relay-apps -tarball) provides a couple of high-level logging functions to the -kernel which allow writing formatted text or raw data to a channel, -regardless of whether a channel to write into exists or not, or even -whether the relay interface is compiled into the kernel or not. These -functions allow you to put unconditional 'trace' statements anywhere -in the kernel or kernel modules; only when there is a 'klog handler' -registered will data actually be logged (see the klog and kleak -examples for details). - -It is of course possible to use the relay interface from scratch, -i.e. without using any of the relay-apps example code or klog, but -you'll have to implement communication between userspace and kernel, -allowing both to convey the state of buffers (full, empty, amount of -padding). The read() interface both removes padding and internally -consumes the read sub-buffers; thus in cases where read(2) is being -used to drain the channel buffers, special-purpose communication -between kernel and user isn't necessary for basic operation. Things -such as buffer-full conditions would still need to be communicated via -some channel though. - -klog and the relay-apps examples can be found in the relay-apps -tarball on http://relayfs.sourceforge.net - -The relay interface user space API -================================== - -The relay interface implements basic file operations for user space -access to relay channel buffer data. Here are the file operations -that are available and some comments regarding their behavior: - -open() enables user to open an _existing_ channel buffer. - -mmap() results in channel buffer being mapped into the caller's - memory space. Note that you can't do a partial mmap - you - must map the entire file, which is NRBUF * SUBBUFSIZE. - -read() read the contents of a channel buffer. The bytes read are - 'consumed' by the reader, i.e. they won't be available - again to subsequent reads. If the channel is being used - in no-overwrite mode (the default), it can be read at any - time even if there's an active kernel writer. If the - channel is being used in overwrite mode and there are - active channel writers, results may be unpredictable - - users should make sure that all logging to the channel has - ended before using read() with overwrite mode. Sub-buffer - padding is automatically removed and will not be seen by - the reader. - -sendfile() transfer data from a channel buffer to an output file - descriptor. Sub-buffer padding is automatically removed - and will not be seen by the reader. - -poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are - notified when sub-buffer boundaries are crossed. - -close() decrements the channel buffer's refcount. When the refcount - reaches 0, i.e. when no process or kernel client has the - buffer open, the channel buffer is freed. - -In order for a user application to make use of relay files, the -host filesystem must be mounted. For example, - - mount -t debugfs debugfs /debug - -NOTE: the host filesystem doesn't need to be mounted for kernel - clients to create or use channels - it only needs to be - mounted when user space applications need access to the buffer - data. - - -The relay interface kernel API -============================== - -Here's a summary of the API the relay interface provides to in-kernel clients: - -TBD(curr. line MT:/API/) - channel management functions: - - relay_open(base_filename, parent, subbuf_size, n_subbufs, - callbacks) - relay_close(chan) - relay_flush(chan) - relay_reset(chan) - - channel management typically called on instigation of userspace: - - relay_subbufs_consumed(chan, cpu, subbufs_consumed) - - write functions: - - relay_write(chan, data, length) - __relay_write(chan, data, length) - relay_reserve(chan, length) - - callbacks: - - subbuf_start(buf, subbuf, prev_subbuf, prev_padding) - buf_mapped(buf, filp) - buf_unmapped(buf, filp) - create_buf_file(filename, parent, mode, buf, is_global) - remove_buf_file(dentry) - - helper functions: - - relay_buf_full(buf) - subbuf_start_reserve(buf, length) - - -Creating a channel ------------------- - -relay_open() is used to create a channel, along with its per-cpu -channel buffers. Each channel buffer will have an associated file -created for it in the host filesystem, which can be and mmapped or -read from in user space. The files are named basename0...basenameN-1 -where N is the number of online cpus, and by default will be created -in the root of the filesystem (if the parent param is NULL). If you -want a directory structure to contain your relay files, you should -create it using the host filesystem's directory creation function, -e.g. debugfs_create_dir(), and pass the parent directory to -relay_open(). Users are responsible for cleaning up any directory -structure they create, when the channel is closed - again the host -filesystem's directory removal functions should be used for that, -e.g. debugfs_remove(). - -In order for a channel to be created and the host filesystem's files -associated with its channel buffers, the user must provide definitions -for two callback functions, create_buf_file() and remove_buf_file(). -create_buf_file() is called once for each per-cpu buffer from -relay_open() and allows the user to create the file which will be used -to represent the corresponding channel buffer. The callback should -return the dentry of the file created to represent the channel buffer. -remove_buf_file() must also be defined; it's responsible for deleting -the file(s) created in create_buf_file() and is called during -relay_close(). - -Here are some typical definitions for these callbacks, in this case -using debugfs: - -/* - * create_buf_file() callback. Creates relay file in debugfs. - */ -static struct dentry *create_buf_file_handler(const char *filename, - struct dentry *parent, - int mode, - struct rchan_buf *buf, - int *is_global) -{ - return debugfs_create_file(filename, mode, parent, buf, - &relay_file_operations); -} - -/* - * remove_buf_file() callback. Removes relay file from debugfs. - */ -static int remove_buf_file_handler(struct dentry *dentry) -{ - debugfs_remove(dentry); - - return 0; -} - -/* - * relay interface callbacks - */ -static struct rchan_callbacks relay_callbacks = -{ - .create_buf_file = create_buf_file_handler, - .remove_buf_file = remove_buf_file_handler, -}; - -And an example relay_open() invocation using them: - - chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks); - -If the create_buf_file() callback fails, or isn't defined, channel -creation and thus relay_open() will fail. - -The total size of each per-cpu buffer is calculated by multiplying the -number of sub-buffers by the sub-buffer size passed into relay_open(). -The idea behind sub-buffers is that they're basically an extension of -double-buffering to N buffers, and they also allow applications to -easily implement random-access-on-buffer-boundary schemes, which can -be important for some high-volume applications. The number and size -of sub-buffers is completely dependent on the application and even for -the same application, different conditions will warrant different -values for these parameters at different times. Typically, the right -values to use are best decided after some experimentation; in general, -though, it's safe to assume that having only 1 sub-buffer is a bad -idea - you're guaranteed to either overwrite data or lose events -depending on the channel mode being used. - -The create_buf_file() implementation can also be defined in such a way -as to allow the creation of a single 'global' buffer instead of the -default per-cpu set. This can be useful for applications interested -mainly in seeing the relative ordering of system-wide events without -the need to bother with saving explicit timestamps for the purpose of -merging/sorting per-cpu files in a postprocessing step. - -To have relay_open() create a global buffer, the create_buf_file() -implementation should set the value of the is_global outparam to a -non-zero value in addition to creating the file that will be used to -represent the single buffer. In the case of a global buffer, -create_buf_file() and remove_buf_file() will be called only once. The -normal channel-writing functions, e.g. relay_write(), can still be -used - writes from any cpu will transparently end up in the global -buffer - but since it is a global buffer, callers should make sure -they use the proper locking for such a buffer, either by wrapping -writes in a spinlock, or by copying a write function from relay.h and -creating a local version that internally does the proper locking. - -Channel 'modes' ---------------- - -relay channels can be used in either of two modes - 'overwrite' or -'no-overwrite'. The mode is entirely determined by the implementation -of the subbuf_start() callback, as described below. The default if no -subbuf_start() callback is defined is 'no-overwrite' mode. If the -default mode suits your needs, and you plan to use the read() -interface to retrieve channel data, you can ignore the details of this -section, as it pertains mainly to mmap() implementations. - -In 'overwrite' mode, also known as 'flight recorder' mode, writes -continuously cycle around the buffer and will never fail, but will -unconditionally overwrite old data regardless of whether it's actually -been consumed. In no-overwrite mode, writes will fail, i.e. data will -be lost, if the number of unconsumed sub-buffers equals the total -number of sub-buffers in the channel. It should be clear that if -there is no consumer or if the consumer can't consume sub-buffers fast -enough, data will be lost in either case; the only difference is -whether data is lost from the beginning or the end of a buffer. - -As explained above, a relay channel is made of up one or more -per-cpu channel buffers, each implemented as a circular buffer -subdivided into one or more sub-buffers. Messages are written into -the current sub-buffer of the channel's current per-cpu buffer via the -write functions described below. Whenever a message can't fit into -the current sub-buffer, because there's no room left for it, the -client is notified via the subbuf_start() callback that a switch to a -new sub-buffer is about to occur. The client uses this callback to 1) -initialize the next sub-buffer if appropriate 2) finalize the previous -sub-buffer if appropriate and 3) return a boolean value indicating -whether or not to actually move on to the next sub-buffer. - -To implement 'no-overwrite' mode, the userspace client would provide -an implementation of the subbuf_start() callback something like the -following: - -static int subbuf_start(struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - unsigned int prev_padding) -{ - if (prev_subbuf) - *((unsigned *)prev_subbuf) = prev_padding; - - if (relay_buf_full(buf)) - return 0; - - subbuf_start_reserve(buf, sizeof(unsigned int)); - - return 1; -} - -If the current buffer is full, i.e. all sub-buffers remain unconsumed, -the callback returns 0 to indicate that the buffer switch should not -occur yet, i.e. until the consumer has had a chance to read the -current set of ready sub-buffers. For the relay_buf_full() function -to make sense, the consumer is reponsible for notifying the relay -interface when sub-buffers have been consumed via -relay_subbufs_consumed(). Any subsequent attempts to write into the -buffer will again invoke the subbuf_start() callback with the same -parameters; only when the consumer has consumed one or more of the -ready sub-buffers will relay_buf_full() return 0, in which case the -buffer switch can continue. - -The implementation of the subbuf_start() callback for 'overwrite' mode -would be very similar: - -static int subbuf_start(struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - unsigned int prev_padding) -{ - if (prev_subbuf) - *((unsigned *)prev_subbuf) = prev_padding; - - subbuf_start_reserve(buf, sizeof(unsigned int)); - - return 1; -} - -In this case, the relay_buf_full() check is meaningless and the -callback always returns 1, causing the buffer switch to occur -unconditionally. It's also meaningless for the client to use the -relay_subbufs_consumed() function in this mode, as it's never -consulted. - -The default subbuf_start() implementation, used if the client doesn't -define any callbacks, or doesn't define the subbuf_start() callback, -implements the simplest possible 'no-overwrite' mode, i.e. it does -nothing but return 0. - -Header information can be reserved at the beginning of each sub-buffer -by calling the subbuf_start_reserve() helper function from within the -subbuf_start() callback. This reserved area can be used to store -whatever information the client wants. In the example above, room is -reserved in each sub-buffer to store the padding count for that -sub-buffer. This is filled in for the previous sub-buffer in the -subbuf_start() implementation; the padding value for the previous -sub-buffer is passed into the subbuf_start() callback along with a -pointer to the previous sub-buffer, since the padding value isn't -known until a sub-buffer is filled. The subbuf_start() callback is -also called for the first sub-buffer when the channel is opened, to -give the client a chance to reserve space in it. In this case the -previous sub-buffer pointer passed into the callback will be NULL, so -the client should check the value of the prev_subbuf pointer before -writing into the previous sub-buffer. - -Writing to a channel --------------------- - -Kernel clients write data into the current cpu's channel buffer using -relay_write() or __relay_write(). relay_write() is the main logging -function - it uses local_irqsave() to protect the buffer and should be -used if you might be logging from interrupt context. If you know -you'll never be logging from interrupt context, you can use -__relay_write(), which only disables preemption. These functions -don't return a value, so you can't determine whether or not they -failed - the assumption is that you wouldn't want to check a return -value in the fast logging path anyway, and that they'll always succeed -unless the buffer is full and no-overwrite mode is being used, in -which case you can detect a failed write in the subbuf_start() -callback by calling the relay_buf_full() helper function. - -relay_reserve() is used to reserve a slot in a channel buffer which -can be written to later. This would typically be used in applications -that need to write directly into a channel buffer without having to -stage data in a temporary buffer beforehand. Because the actual write -may not happen immediately after the slot is reserved, applications -using relay_reserve() can keep a count of the number of bytes actually -written, either in space reserved in the sub-buffers themselves or as -a separate array. See the 'reserve' example in the relay-apps tarball -at http://relayfs.sourceforge.net for an example of how this can be -done. Because the write is under control of the client and is -separated from the reserve, relay_reserve() doesn't protect the buffer -at all - it's up to the client to provide the appropriate -synchronization when using relay_reserve(). - -Closing a channel ------------------ - -The client calls relay_close() when it's finished using the channel. -The channel and its associated buffers are destroyed when there are no -longer any references to any of the channel buffers. relay_flush() -forces a sub-buffer switch on all the channel buffers, and can be used -to finalize and process the last sub-buffers before the channel is -closed. - -Misc ----- - -Some applications may want to keep a channel around and re-use it -rather than open and close a new channel for each use. relay_reset() -can be used for this purpose - it resets a channel to its initial -state without reallocating channel buffer memory or destroying -existing mappings. It should however only be called when it's safe to -do so, i.e. when the channel isn't currently being written to. - -Finally, there are a couple of utility callbacks that can be used for -different purposes. buf_mapped() is called whenever a channel buffer -is mmapped from user space and buf_unmapped() is called when it's -unmapped. The client can use this notification to trigger actions -within the kernel application, such as enabling/disabling logging to -the channel. - - -Resources -========= - -For news, example code, mailing list, etc. see the relay interface homepage: - - http://relayfs.sourceforge.net - - -Credits -======= - -The ideas and specs for the relay interface came about as a result of -discussions on tracing involving the following: - -Michel Dagenais -Richard Moore -Bob Wisniewski -Karim Yaghmour -Tom Zanussi - -Also thanks to Hubertus Franke for a lot of useful suggestions and bug -reports. diff --git a/trunk/Documentation/filesystems/relayfs.txt b/trunk/Documentation/filesystems/relayfs.txt new file mode 100644 index 000000000000..5832377b7340 --- /dev/null +++ b/trunk/Documentation/filesystems/relayfs.txt @@ -0,0 +1,442 @@ + +relayfs - a high-speed data relay filesystem +============================================ + +relayfs is a filesystem designed to provide an efficient mechanism for +tools and facilities to relay large and potentially sustained streams +of data from kernel space to user space. + +The main abstraction of relayfs is the 'channel'. A channel consists +of a set of per-cpu kernel buffers each represented by a file in the +relayfs filesystem. Kernel clients write into a channel using +efficient write functions which automatically log to the current cpu's +channel buffer. User space applications mmap() the per-cpu files and +retrieve the data as it becomes available. + +The format of the data logged into the channel buffers is completely +up to the relayfs client; relayfs does however provide hooks which +allow clients to impose some structure on the buffer data. Nor does +relayfs implement any form of data filtering - this also is left to +the client. The purpose is to keep relayfs as simple as possible. + +This document provides an overview of the relayfs API. The details of +the function parameters are documented along with the functions in the +filesystem code - please see that for details. + +Semantics +========= + +Each relayfs channel has one buffer per CPU, each buffer has one or +more sub-buffers. Messages are written to the first sub-buffer until +it is too full to contain a new message, in which case it it is +written to the next (if available). Messages are never split across +sub-buffers. At this point, userspace can be notified so it empties +the first sub-buffer, while the kernel continues writing to the next. + +When notified that a sub-buffer is full, the kernel knows how many +bytes of it are padding i.e. unused. Userspace can use this knowledge +to copy only valid data. + +After copying it, userspace can notify the kernel that a sub-buffer +has been consumed. + +relayfs can operate in a mode where it will overwrite data not yet +collected by userspace, and not wait for it to consume it. + +relayfs itself does not provide for communication of such data between +userspace and kernel, allowing the kernel side to remain simple and +not impose a single interface on userspace. It does provide a set of +examples and a separate helper though, described below. + +klog and relay-apps example code +================================ + +relayfs itself is ready to use, but to make things easier, a couple +simple utility functions and a set of examples are provided. + +The relay-apps example tarball, available on the relayfs sourceforge +site, contains a set of self-contained examples, each consisting of a +pair of .c files containing boilerplate code for each of the user and +kernel sides of a relayfs application; combined these two sets of +boilerplate code provide glue to easily stream data to disk, without +having to bother with mundane housekeeping chores. + +The 'klog debugging functions' patch (klog.patch in the relay-apps +tarball) provides a couple of high-level logging functions to the +kernel which allow writing formatted text or raw data to a channel, +regardless of whether a channel to write into exists or not, or +whether relayfs is compiled into the kernel or is configured as a +module. These functions allow you to put unconditional 'trace' +statements anywhere in the kernel or kernel modules; only when there +is a 'klog handler' registered will data actually be logged (see the +klog and kleak examples for details). + +It is of course possible to use relayfs from scratch i.e. without +using any of the relay-apps example code or klog, but you'll have to +implement communication between userspace and kernel, allowing both to +convey the state of buffers (full, empty, amount of padding). + +klog and the relay-apps examples can be found in the relay-apps +tarball on http://relayfs.sourceforge.net + + +The relayfs user space API +========================== + +relayfs implements basic file operations for user space access to +relayfs channel buffer data. Here are the file operations that are +available and some comments regarding their behavior: + +open() enables user to open an _existing_ buffer. + +mmap() results in channel buffer being mapped into the caller's + memory space. Note that you can't do a partial mmap - you must + map the entire file, which is NRBUF * SUBBUFSIZE. + +read() read the contents of a channel buffer. The bytes read are + 'consumed' by the reader i.e. they won't be available again + to subsequent reads. If the channel is being used in + no-overwrite mode (the default), it can be read at any time + even if there's an active kernel writer. If the channel is + being used in overwrite mode and there are active channel + writers, results may be unpredictable - users should make + sure that all logging to the channel has ended before using + read() with overwrite mode. + +poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are + notified when sub-buffer boundaries are crossed. + +close() decrements the channel buffer's refcount. When the refcount + reaches 0 i.e. when no process or kernel client has the buffer + open, the channel buffer is freed. + + +In order for a user application to make use of relayfs files, the +relayfs filesystem must be mounted. For example, + + mount -t relayfs relayfs /mnt/relay + +NOTE: relayfs doesn't need to be mounted for kernel clients to create + or use channels - it only needs to be mounted when user space + applications need access to the buffer data. + + +The relayfs kernel API +====================== + +Here's a summary of the API relayfs provides to in-kernel clients: + + + channel management functions: + + relay_open(base_filename, parent, subbuf_size, n_subbufs, + callbacks) + relay_close(chan) + relay_flush(chan) + relay_reset(chan) + relayfs_create_dir(name, parent) + relayfs_remove_dir(dentry) + relayfs_create_file(name, parent, mode, fops, data) + relayfs_remove_file(dentry) + + channel management typically called on instigation of userspace: + + relay_subbufs_consumed(chan, cpu, subbufs_consumed) + + write functions: + + relay_write(chan, data, length) + __relay_write(chan, data, length) + relay_reserve(chan, length) + + callbacks: + + subbuf_start(buf, subbuf, prev_subbuf, prev_padding) + buf_mapped(buf, filp) + buf_unmapped(buf, filp) + create_buf_file(filename, parent, mode, buf, is_global) + remove_buf_file(dentry) + + helper functions: + + relay_buf_full(buf) + subbuf_start_reserve(buf, length) + + +Creating a channel +------------------ + +relay_open() is used to create a channel, along with its per-cpu +channel buffers. Each channel buffer will have an associated file +created for it in the relayfs filesystem, which can be opened and +mmapped from user space if desired. The files are named +basename0...basenameN-1 where N is the number of online cpus, and by +default will be created in the root of the filesystem. If you want a +directory structure to contain your relayfs files, you can create it +with relayfs_create_dir() and pass the parent directory to +relay_open(). Clients are responsible for cleaning up any directory +structure they create when the channel is closed - use +relayfs_remove_dir() for that. + +The total size of each per-cpu buffer is calculated by multiplying the +number of sub-buffers by the sub-buffer size passed into relay_open(). +The idea behind sub-buffers is that they're basically an extension of +double-buffering to N buffers, and they also allow applications to +easily implement random-access-on-buffer-boundary schemes, which can +be important for some high-volume applications. The number and size +of sub-buffers is completely dependent on the application and even for +the same application, different conditions will warrant different +values for these parameters at different times. Typically, the right +values to use are best decided after some experimentation; in general, +though, it's safe to assume that having only 1 sub-buffer is a bad +idea - you're guaranteed to either overwrite data or lose events +depending on the channel mode being used. + +Channel 'modes' +--------------- + +relayfs channels can be used in either of two modes - 'overwrite' or +'no-overwrite'. The mode is entirely determined by the implementation +of the subbuf_start() callback, as described below. In 'overwrite' +mode, also known as 'flight recorder' mode, writes continuously cycle +around the buffer and will never fail, but will unconditionally +overwrite old data regardless of whether it's actually been consumed. +In no-overwrite mode, writes will fail i.e. data will be lost, if the +number of unconsumed sub-buffers equals the total number of +sub-buffers in the channel. It should be clear that if there is no +consumer or if the consumer can't consume sub-buffers fast enought, +data will be lost in either case; the only difference is whether data +is lost from the beginning or the end of a buffer. + +As explained above, a relayfs channel is made of up one or more +per-cpu channel buffers, each implemented as a circular buffer +subdivided into one or more sub-buffers. Messages are written into +the current sub-buffer of the channel's current per-cpu buffer via the +write functions described below. Whenever a message can't fit into +the current sub-buffer, because there's no room left for it, the +client is notified via the subbuf_start() callback that a switch to a +new sub-buffer is about to occur. The client uses this callback to 1) +initialize the next sub-buffer if appropriate 2) finalize the previous +sub-buffer if appropriate and 3) return a boolean value indicating +whether or not to actually go ahead with the sub-buffer switch. + +To implement 'no-overwrite' mode, the userspace client would provide +an implementation of the subbuf_start() callback something like the +following: + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + unsigned int prev_padding) +{ + if (prev_subbuf) + *((unsigned *)prev_subbuf) = prev_padding; + + if (relay_buf_full(buf)) + return 0; + + subbuf_start_reserve(buf, sizeof(unsigned int)); + + return 1; +} + +If the current buffer is full i.e. all sub-buffers remain unconsumed, +the callback returns 0 to indicate that the buffer switch should not +occur yet i.e. until the consumer has had a chance to read the current +set of ready sub-buffers. For the relay_buf_full() function to make +sense, the consumer is reponsible for notifying relayfs when +sub-buffers have been consumed via relay_subbufs_consumed(). Any +subsequent attempts to write into the buffer will again invoke the +subbuf_start() callback with the same parameters; only when the +consumer has consumed one or more of the ready sub-buffers will +relay_buf_full() return 0, in which case the buffer switch can +continue. + +The implementation of the subbuf_start() callback for 'overwrite' mode +would be very similar: + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + unsigned int prev_padding) +{ + if (prev_subbuf) + *((unsigned *)prev_subbuf) = prev_padding; + + subbuf_start_reserve(buf, sizeof(unsigned int)); + + return 1; +} + +In this case, the relay_buf_full() check is meaningless and the +callback always returns 1, causing the buffer switch to occur +unconditionally. It's also meaningless for the client to use the +relay_subbufs_consumed() function in this mode, as it's never +consulted. + +The default subbuf_start() implementation, used if the client doesn't +define any callbacks, or doesn't define the subbuf_start() callback, +implements the simplest possible 'no-overwrite' mode i.e. it does +nothing but return 0. + +Header information can be reserved at the beginning of each sub-buffer +by calling the subbuf_start_reserve() helper function from within the +subbuf_start() callback. This reserved area can be used to store +whatever information the client wants. In the example above, room is +reserved in each sub-buffer to store the padding count for that +sub-buffer. This is filled in for the previous sub-buffer in the +subbuf_start() implementation; the padding value for the previous +sub-buffer is passed into the subbuf_start() callback along with a +pointer to the previous sub-buffer, since the padding value isn't +known until a sub-buffer is filled. The subbuf_start() callback is +also called for the first sub-buffer when the channel is opened, to +give the client a chance to reserve space in it. In this case the +previous sub-buffer pointer passed into the callback will be NULL, so +the client should check the value of the prev_subbuf pointer before +writing into the previous sub-buffer. + +Writing to a channel +-------------------- + +kernel clients write data into the current cpu's channel buffer using +relay_write() or __relay_write(). relay_write() is the main logging +function - it uses local_irqsave() to protect the buffer and should be +used if you might be logging from interrupt context. If you know +you'll never be logging from interrupt context, you can use +__relay_write(), which only disables preemption. These functions +don't return a value, so you can't determine whether or not they +failed - the assumption is that you wouldn't want to check a return +value in the fast logging path anyway, and that they'll always succeed +unless the buffer is full and no-overwrite mode is being used, in +which case you can detect a failed write in the subbuf_start() +callback by calling the relay_buf_full() helper function. + +relay_reserve() is used to reserve a slot in a channel buffer which +can be written to later. This would typically be used in applications +that need to write directly into a channel buffer without having to +stage data in a temporary buffer beforehand. Because the actual write +may not happen immediately after the slot is reserved, applications +using relay_reserve() can keep a count of the number of bytes actually +written, either in space reserved in the sub-buffers themselves or as +a separate array. See the 'reserve' example in the relay-apps tarball +at http://relayfs.sourceforge.net for an example of how this can be +done. Because the write is under control of the client and is +separated from the reserve, relay_reserve() doesn't protect the buffer +at all - it's up to the client to provide the appropriate +synchronization when using relay_reserve(). + +Closing a channel +----------------- + +The client calls relay_close() when it's finished using the channel. +The channel and its associated buffers are destroyed when there are no +longer any references to any of the channel buffers. relay_flush() +forces a sub-buffer switch on all the channel buffers, and can be used +to finalize and process the last sub-buffers before the channel is +closed. + +Creating non-relay files +------------------------ + +relay_open() automatically creates files in the relayfs filesystem to +represent the per-cpu kernel buffers; it's often useful for +applications to be able to create their own files alongside the relay +files in the relayfs filesystem as well e.g. 'control' files much like +those created in /proc or debugfs for similar purposes, used to +communicate control information between the kernel and user sides of a +relayfs application. For this purpose the relayfs_create_file() and +relayfs_remove_file() API functions exist. For relayfs_create_file(), +the caller passes in a set of user-defined file operations to be used +for the file and an optional void * to a user-specified data item, +which will be accessible via inode->u.generic_ip (see the relay-apps +tarball for examples). The file_operations are a required parameter +to relayfs_create_file() and thus the semantics of these files are +completely defined by the caller. + +See the relay-apps tarball at http://relayfs.sourceforge.net for +examples of how these non-relay files are meant to be used. + +Creating relay files in other filesystems +----------------------------------------- + +By default of course, relay_open() creates relay files in the relayfs +filesystem. Because relay_file_operations is exported, however, it's +also possible to create and use relay files in other pseudo-filesytems +such as debugfs. + +For this purpose, two callback functions are provided, +create_buf_file() and remove_buf_file(). create_buf_file() is called +once for each per-cpu buffer from relay_open() to allow the client to +create a file to be used to represent the corresponding buffer; if +this callback is not defined, the default implementation will create +and return a file in the relayfs filesystem to represent the buffer. +The callback should return the dentry of the file created to represent +the relay buffer. Note that the parent directory passed to +relay_open() (and passed along to the callback), if specified, must +exist in the same filesystem the new relay file is created in. If +create_buf_file() is defined, remove_buf_file() must also be defined; +it's responsible for deleting the file(s) created in create_buf_file() +and is called during relay_close(). + +The create_buf_file() implementation can also be defined in such a way +as to allow the creation of a single 'global' buffer instead of the +default per-cpu set. This can be useful for applications interested +mainly in seeing the relative ordering of system-wide events without +the need to bother with saving explicit timestamps for the purpose of +merging/sorting per-cpu files in a postprocessing step. + +To have relay_open() create a global buffer, the create_buf_file() +implementation should set the value of the is_global outparam to a +non-zero value in addition to creating the file that will be used to +represent the single buffer. In the case of a global buffer, +create_buf_file() and remove_buf_file() will be called only once. The +normal channel-writing functions e.g. relay_write() can still be used +- writes from any cpu will transparently end up in the global buffer - +but since it is a global buffer, callers should make sure they use the +proper locking for such a buffer, either by wrapping writes in a +spinlock, or by copying a write function from relayfs_fs.h and +creating a local version that internally does the proper locking. + +See the 'exported-relayfile' examples in the relay-apps tarball for +examples of creating and using relay files in debugfs. + +Misc +---- + +Some applications may want to keep a channel around and re-use it +rather than open and close a new channel for each use. relay_reset() +can be used for this purpose - it resets a channel to its initial +state without reallocating channel buffer memory or destroying +existing mappings. It should however only be called when it's safe to +do so i.e. when the channel isn't currently being written to. + +Finally, there are a couple of utility callbacks that can be used for +different purposes. buf_mapped() is called whenever a channel buffer +is mmapped from user space and buf_unmapped() is called when it's +unmapped. The client can use this notification to trigger actions +within the kernel application, such as enabling/disabling logging to +the channel. + + +Resources +========= + +For news, example code, mailing list, etc. see the relayfs homepage: + + http://relayfs.sourceforge.net + + +Credits +======= + +The ideas and specs for relayfs came about as a result of discussions +on tracing involving the following: + +Michel Dagenais +Richard Moore +Bob Wisniewski +Karim Yaghmour +Tom Zanussi + +Also thanks to Hubertus Franke for a lot of useful suggestions and bug +reports. diff --git a/trunk/Documentation/input/joystick.txt b/trunk/Documentation/input/joystick.txt index 841c353297e6..d53b857a3710 100644 --- a/trunk/Documentation/input/joystick.txt +++ b/trunk/Documentation/input/joystick.txt @@ -39,6 +39,7 @@ them. Bug reports and success stories are also welcome. The input project website is at: + http://www.suse.cz/development/input/ http://atrey.karlin.mff.cuni.cz/~vojtech/input/ There is also a mailing list for the driver at: diff --git a/trunk/Documentation/scsi/ChangeLog.megaraid b/trunk/Documentation/scsi/ChangeLog.megaraid index a056bbe67c7e..c173806c91fa 100644 --- a/trunk/Documentation/scsi/ChangeLog.megaraid +++ b/trunk/Documentation/scsi/ChangeLog.megaraid @@ -1,126 +1,3 @@ -Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju -Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module) -Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) - -1. Fixed a bug in megaraid_init_mbox(). - Customer reported "garbage in file on x86_64 platform". - Root Cause: the driver registered controllers as 64-bit DMA capable - for those which are not support it. - Fix: Made change in the function inserting identification machanism - identifying 64-bit DMA capable controllers. - - > -----Original Message----- - > From: Vasily Averin [mailto:vvs@sw.ru] - > Sent: Thursday, May 04, 2006 2:49 PM - > To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul; - > Ju, Seokmann; Bagalkote, Sreenivas; - > James.Bottomley@SteelEye.com; devel@openvz.org - > Subject: megaraid_mbox: garbage in file - > - > Hello all, - > - > I've investigated customers claim on the unstable work of - > their node and found a - > strange effect: reading from some files leads to the - > "attempt to access beyond end of device" messages. - > - > I've checked filesystem, memory on the node, motherboard BIOS - > version, but it - > does not help and issue still has been reproduced by simple - > file reading. - > - > Reproducer is simple: - > - > echo 0xffffffff >/proc/sys/dev/scsi/logging_level ; - > cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ; - > echo 0 >/proc/sys/dev/scsi/logging - > - > It leads to the following messages in dmesg - > - > sd_init_command: disk=sda, block=871769260, count=26 - > sda : block=871769260 - > sda : reading 26/26 512 byte blocks. - > scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420) - > sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0: - > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00 - > buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40, - > queuecommand 0xc0344010 - > leaving scsi_dispatch_cmnd() - > scsi_delete_timer: scmd: f79ed980, rtn: 1 - > sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0: - > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00 - > scsi host busy 1 failed 0 - > sd 0:1:0:0: Notifying upper driver of completion (result 0) - > sd_rw_intr: sda: res=0x0 - > 26 sectors total, 13312 bytes done. - > use_sg is 4 - > attempt to access beyond end of device - > sda6: rw=0, want=1044134458, limit=951401367 - > Buffer I/O error on device sda6, logical block 522067228 - > attempt to access beyond end of device - -2. When INQUIRY with EVPD bit set issued to the MegaRAID controller, - system memory gets corrupted. - Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set - incorrectly. - Fix: MegaRAID F/W has fixed the problem and being process of release, - soon. Meanwhile, driver will filter out the request. - -3. One of member in the data structure of the driver leads unaligne - issue on 64-bit platform. - Customer reporeted "kernel unaligned access addrss" issue when - application communicates with MegaRAID HBA driver. - Root Cause: in uioc_t structure, one of member had misaligned and it - led system to display the error message. - Fix: A patch submitted to community from following folk. - - > -----Original Message----- - > From: linux-scsi-owner@vger.kernel.org - > [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi - > Sent: Wednesday, July 12, 2006 4:20 AM - > To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org - > Subject: Re: Help: strange messages from kernel on IA64 platform - > - > Hi, - > - > I saw same message. - > - > When GAM(Global Array Manager) is started, The following - > message output. - > kernel: kernel unaligned access to 0xe0000001fe1080d4, - > ip=0xa000000200053371 - > - > The uioc structure used by ioctl is defined by packed, - > the allignment of each member are disturbed. - > In a 64 bit structure, the allignment of member doesn't fit 64 bit - > boundary. this causes this messages. - > In a 32 bit structure, we don't see the message because the allinment - > of member fit 32 bit boundary even if packed is specified. - > - > patch - > I Add 32 bit dummy member to fit 64 bit boundary. I tested. - > We confirmed this patch fix the problem by IA64 server. - > - > ************************************************************** - > **************** - > --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig - > 2006-04-03 17:13:03.000000000 +0900 - > +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h - > 2006-04-03 17:14:09.000000000 +0900 - > @@ -132,6 +132,10 @@ - > /* Driver Data: */ - > void __user * user_data; - > uint32_t user_data_len; - > + - > + /* 64bit alignment */ - > + uint32_t pad_0xBC; - > + - > mraid_passthru_t __user *user_pthru; - > - > mraid_passthru_t *pthru32; - > ************************************************************** - > **************** - Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) diff --git a/trunk/Documentation/sysctl/fs.txt b/trunk/Documentation/sysctl/fs.txt index 5c3a51905969..0b62c62142cf 100644 --- a/trunk/Documentation/sysctl/fs.txt +++ b/trunk/Documentation/sysctl/fs.txt @@ -25,7 +25,6 @@ Currently, these files are in /proc/sys/fs: - inode-state - overflowuid - overflowgid -- suid_dumpable - super-max - super-nr @@ -132,25 +131,6 @@ The default is 65534. ============================================================== -suid_dumpable: - -This value can be used to query and set the core dump mode for setuid -or otherwise protected/tainted binaries. The modes are - -0 - (default) - traditional behaviour. Any process which has changed - privilege levels or is execute only will not be dumped -1 - (debug) - all processes dump core when possible. The core dump is - owned by the current user and no security is applied. This is - intended for system debugging situations only. Ptrace is unchecked. -2 - (suidsafe) - any binary which normally would not be dumped is dumped - readable by root only. This allows the end user to remove - such a dump but not access it directly. For security reasons - core dumps in this mode will not overwrite one another or - other files. This mode is appropriate when adminstrators are - attempting to debug problems in a normal environment. - -============================================================== - super-max & super-nr: These numbers control the maximum number of superblocks, and diff --git a/trunk/Documentation/sysctl/kernel.txt b/trunk/Documentation/sysctl/kernel.txt index 89bf8c20a586..7345c338080a 100644 --- a/trunk/Documentation/sysctl/kernel.txt +++ b/trunk/Documentation/sysctl/kernel.txt @@ -50,6 +50,7 @@ show up in /proc/sys/kernel: - shmmax [ sysv ipc ] - shmmni - stop-a [ SPARC only ] +- suid_dumpable - sysrq ==> Documentation/sysrq.txt - tainted - threads-max @@ -309,6 +310,25 @@ kernel. This value defaults to SHMMAX. ============================================================== +suid_dumpable: + +This value can be used to query and set the core dump mode for setuid +or otherwise protected/tainted binaries. The modes are + +0 - (default) - traditional behaviour. Any process which has changed + privilege levels or is execute only will not be dumped +1 - (debug) - all processes dump core when possible. The core dump is + owned by the current user and no security is applied. This is + intended for system debugging situations only. Ptrace is unchecked. +2 - (suidsafe) - any binary which normally would not be dumped is dumped + readable by root only. This allows the end user to remove + such a dump but not access it directly. For security reasons + core dumps in this mode will not overwrite one another or + other files. This mode is appropriate when adminstrators are + attempting to debug problems in a normal environment. + +============================================================== + tainted: Non-zero if the kernel has been tainted. Numeric values, which diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 3bbd7064f947..d3315a552be1 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -889,12 +889,6 @@ M: rdunlap@xenotime.net T: git http://tali.admingilde.org/git/linux-docbook.git S: Maintained -DOCKING STATION DRIVER -P: Kristen Carlson Accardi -M: kristen.c.accardi@intel.com -L: linux-acpi@vger.kernel.org -S: Maintained - DOUBLETALK DRIVER P: James R. Van Zandt M: jrv@vanzandt.mv.com diff --git a/trunk/Makefile b/trunk/Makefile index 33559b566449..8406d02c6385 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 18 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc4 NAME=Crazed Snow-Weasel # *DOCUMENTATION* diff --git a/trunk/arch/arm/common/dmabounce.c b/trunk/arch/arm/common/dmabounce.c index 028bdc9228fb..5b7c26395b44 100644 --- a/trunk/arch/arm/common/dmabounce.c +++ b/trunk/arch/arm/common/dmabounce.c @@ -179,19 +179,17 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, static inline struct safe_buffer * find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) { - struct safe_buffer *b, *rb = NULL; + struct safe_buffer *b = NULL; unsigned long flags; read_lock_irqsave(&device_info->lock, flags); list_for_each_entry(b, &device_info->safe_buffers, node) - if (b->safe_dma_addr == safe_dma_addr) { - rb = b; + if (b->safe_dma_addr == safe_dma_addr) break; - } read_unlock_irqrestore(&device_info->lock, flags); - return rb; + return b; } static inline void diff --git a/trunk/arch/arm/kernel/entry-armv.S b/trunk/arch/arm/kernel/entry-armv.S index de4e33137901..7ea5f01dfc7b 100644 --- a/trunk/arch/arm/kernel/entry-armv.S +++ b/trunk/arch/arm/kernel/entry-armv.S @@ -634,14 +634,6 @@ ENTRY(__switch_to) * purpose. */ - .macro usr_ret, reg -#ifdef CONFIG_ARM_THUMB - bx \reg -#else - mov pc, \reg -#endif - .endm - .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -683,7 +675,7 @@ __kuser_memory_barrier: @ 0xffff0fa0 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) mcr p15, 0, r0, c7, c10, 5 @ dmb #endif - usr_ret lr + mov pc, lr .align 5 @@ -786,7 +778,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 mov r0, #-1 adds r0, r0, #0 #endif - usr_ret lr + mov pc, lr #else @@ -800,7 +792,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 #ifdef CONFIG_SMP mcr p15, 0, r0, c7, c10, 5 @ dmb #endif - usr_ret lr + mov pc, lr #endif @@ -842,11 +834,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 __kuser_get_tls: @ 0xffff0fe0 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) + ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 + mov pc, lr + #else + mrc p15, 0, r0, c13, c0, 3 @ read TLS register + mov pc, lr + #endif - usr_ret lr .rep 5 .word 0 @ pad up to __kuser_helper_version diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S index 5365d4e5949e..4fe386eea4b4 100644 --- a/trunk/arch/arm/kernel/head.S +++ b/trunk/arch/arm/kernel/head.S @@ -118,7 +118,7 @@ ENTRY(secondary_startup) sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, __enable_mmu @ return address - add pc, r10, #PROCINFO_INITFUNC @ initialise processor + add pc, r10, #12 @ initialise processor @ (return control reg) /* diff --git a/trunk/arch/arm/mach-s3c2410/Makefile b/trunk/arch/arm/mach-s3c2410/Makefile index 273e05f2b8de..0c7938645df6 100644 --- a/trunk/arch/arm/mach-s3c2410/Makefile +++ b/trunk/arch/arm/mach-s3c2410/Makefile @@ -10,47 +10,45 @@ obj-m := obj-n := obj- := -# DMA -obj-$(CONFIG_S3C2410_DMA) += dma.o - # S3C2400 support files -obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o +obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o # S3C2410 support files -obj-$(CONFIG_CPU_S3C2410) += s3c2410.o -obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o +obj-$(CONFIG_CPU_S3C2410) += s3c2410.o +obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o +obj-$(CONFIG_S3C2410_DMA) += dma.o # Power Management support -obj-$(CONFIG_PM) += pm.o sleep.o -obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o +obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o # S3C2412 support -obj-$(CONFIG_CPU_S3C2412) += s3c2412.o -obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o +obj-$(CONFIG_CPU_S3C2412) += s3c2412.o +obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o # # S3C244X support -obj-$(CONFIG_CPU_S3C244X) += s3c244x.o -obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o +obj-$(CONFIG_CPU_S3C244X) += s3c244x.o +obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o # Clock control -obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o +obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o # S3C2440 support -obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o -obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o -obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o -obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o +obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o # S3C2442 support -obj-$(CONFIG_CPU_S3C2442) += s3c2442.o -obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o +obj-$(CONFIG_CPU_S3C2442) += s3c2442.o +obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o # bast extras diff --git a/trunk/arch/arm/mach-s3c2410/dma.c b/trunk/arch/arm/mach-s3c2410/dma.c index 25855452fe8c..094cc52745c5 100644 --- a/trunk/arch/arm/mach-s3c2410/dma.c +++ b/trunk/arch/arm/mach-s3c2410/dma.c @@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs) } static void -dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan, +dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs) { printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", @@ -132,16 +132,7 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan) chan->number, fname, line, chan->load_state, chan->curr, chan->next, chan->end); - dmadbg_dumpregs(fname, line, chan, &state); -} - -static void -dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan) -{ - struct s3c2410_dma_regstate state; - - dmadbg_capture(chan, &state); - dmadbg_dumpregs(fname, line, chan, &state); + dmadbg_showregs(fname, line, chan, &state); } #define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan)) @@ -262,14 +253,10 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan, buf->next); reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; } else { - //pr_debug("load_state is %d => autoreload\n", chan->load_state); + pr_debug("load_state is %d => autoreload\n", chan->load_state); reload = S3C2410_DCON_AUTORELOAD; } - if ((buf->data & 0xf0000000) != 0x30000000) { - dmawarn("dmaload: buffer is %p\n", (void *)buf->data); - } - writel(buf->data, chan->addr_reg); dma_wrreg(chan, S3C2410_DMA_DCON, @@ -383,7 +370,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan) tmp |= S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); - pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp); + pr_debug("wrote %08lx to DMASKTRIG\n", tmp); #if 0 /* the dma buffer loads should take care of clearing the AUTO @@ -397,30 +384,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan) dbg_showchan(chan); - /* if we've only loaded one buffer onto the channel, then chec - * to see if we have another, and if so, try and load it so when - * the first buffer is finished, the new one will be loaded onto - * the channel */ - - if (chan->next != NULL) { - if (chan->load_state == S3C2410_DMALOAD_1LOADED) { - - if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { - pr_debug("%s: buff not yet loaded, no more todo\n", - __FUNCTION__); - } else { - chan->load_state = S3C2410_DMALOAD_1RUNNING; - s3c2410_dma_loadbuffer(chan, chan->next); - } - - } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { - s3c2410_dma_loadbuffer(chan, chan->next); - } - } - - local_irq_restore(flags); - return 0; } @@ -472,11 +436,12 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id, buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { pr_debug("%s: out of memory (%ld alloc)\n", - __FUNCTION__, (long)sizeof(*buf)); + __FUNCTION__, sizeof(*buf)); return -ENOMEM; } - //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); + pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); + //dbg_showchan(chan); buf->next = NULL; @@ -572,20 +537,14 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan) case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ - printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", - chan->number, __FUNCTION__); + printk(KERN_ERR "dma%d: timeout waiting for load\n", + chan->number); return; } break; - case S3C2410_DMALOAD_1LOADED_1RUNNING: - /* I belive in this case we do not have anything to do - * until the next buffer comes along, and we turn off the - * reload */ - return; - default: - pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n", + pr_debug("dma%d: lastxfer: unhandled load_state %d with no next", chan->number, chan->load_state); return; @@ -670,14 +629,7 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) } else { } - /* only reload if the channel is still running... our buffer done - * routine may have altered the state by requesting the dma channel - * to stop or shutdown... */ - - /* todo: check that when the channel is shut-down from inside this - * function, we cope with unsetting reload, etc */ - - if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) { + if (chan->next != NULL) { unsigned long flags; switch (chan->load_state) { @@ -692,8 +644,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ - printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", - chan->number, __FUNCTION__); + printk(KERN_ERR "dma%d: timeout waiting for load\n", + chan->number); return IRQ_HANDLED; } @@ -726,6 +678,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) return IRQ_HANDLED; } + + /* s3c2410_request_dma * * get control of an dma channel @@ -764,17 +718,11 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client, pr_debug("dma%d: %s : requesting irq %d\n", channel, __FUNCTION__, chan->irq); - chan->irq_claimed = 1; - local_irq_restore(flags); - err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, client->name, (void *)chan); - local_irq_save(flags); - if (err) { chan->in_use = 0; - chan->irq_claimed = 0; local_irq_restore(flags); printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", @@ -782,6 +730,7 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client, return err; } + chan->irq_claimed = 1; chan->irq_enabled = 1; } @@ -861,7 +810,6 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp |= S3C2410_DMASKTRIG_STOP; - //tmp &= ~S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); #if 0 @@ -871,7 +819,6 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif - /* should stop do this, or should we wait for flush? */ chan->state = S3C2410_DMA_IDLE; chan->load_state = S3C2410_DMALOAD_NONE; @@ -880,22 +827,6 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) return 0; } -void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan) -{ - unsigned long tmp; - unsigned int timeout = 0x10000; - - while (timeout-- > 0) { - tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); - - if (!(tmp & S3C2410_DMASKTRIG_ON)) - return; - } - - pr_debug("dma%d: failed to stop?\n", chan->number); -} - - /* s3c2410_dma_flush * * stop the channel, and remove all current and pending transfers @@ -906,9 +837,7 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan) s3c2410_dma_buf_t *buf, *next; unsigned long flags; - pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number); - - dbg_showchan(chan); + pr_debug("%s:\n", __FUNCTION__); local_irq_save(flags); @@ -935,64 +864,11 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan) } } - dbg_showregs(chan); - - s3c2410_dma_waitforstop(chan); - -#if 0 - /* should also clear interrupts, according to WinCE BSP */ - { - unsigned long tmp; - - tmp = dma_rdreg(chan, S3C2410_DMA_DCON); - tmp |= S3C2410_DCON_NORELOAD; - dma_wrreg(chan, S3C2410_DMA_DCON, tmp); - } -#endif - - dbg_showregs(chan); - local_irq_restore(flags); return 0; } -int -s3c2410_dma_started(s3c2410_dma_chan_t *chan) -{ - unsigned long flags; - - local_irq_save(flags); - - dbg_showchan(chan); - - /* if we've only loaded one buffer onto the channel, then chec - * to see if we have another, and if so, try and load it so when - * the first buffer is finished, the new one will be loaded onto - * the channel */ - - if (chan->next != NULL) { - if (chan->load_state == S3C2410_DMALOAD_1LOADED) { - - if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { - pr_debug("%s: buff not yet loaded, no more todo\n", - __FUNCTION__); - } else { - chan->load_state = S3C2410_DMALOAD_1RUNNING; - s3c2410_dma_loadbuffer(chan, chan->next); - } - - } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { - s3c2410_dma_loadbuffer(chan, chan->next); - } - } - - - local_irq_restore(flags); - - return 0; - -} int s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) @@ -1009,15 +885,14 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) return s3c2410_dma_dostop(chan); case S3C2410_DMAOP_PAUSE: + return -ENOENT; + case S3C2410_DMAOP_RESUME: return -ENOENT; case S3C2410_DMAOP_FLUSH: return s3c2410_dma_flush(chan); - case S3C2410_DMAOP_STARTED: - return s3c2410_dma_started(chan); - case S3C2410_DMAOP_TIMEOUT: return 0; diff --git a/trunk/arch/arm/mach-versatile/core.c b/trunk/arch/arm/mach-versatile/core.c index f2bbef07b1e4..c4e3f8c68479 100644 --- a/trunk/arch/arm/mach-versatile/core.c +++ b/trunk/arch/arm/mach-versatile/core.c @@ -285,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = { static struct resource versatile_flash_resource = { .start = VERSATILE_FLASH_BASE, - .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1, + .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE, .flags = IORESOURCE_MEM, }; diff --git a/trunk/arch/i386/Kconfig b/trunk/arch/i386/Kconfig index b2751eadbc56..f71fb4a029cb 100644 --- a/trunk/arch/i386/Kconfig +++ b/trunk/arch/i386/Kconfig @@ -142,7 +142,6 @@ config X86_SUMMIT In particular, it is needed for the x440. If you don't have one of these computers, you should say N here. - If you want to build a NUMA kernel, you must select ACPI. config X86_BIGSMP bool "Support for other sub-arch SMP systems with more than 8 CPUs" @@ -170,7 +169,6 @@ config X86_GENERICARCH help This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. It is intended for a generic binary kernel. - If you want a NUMA kernel, select ACPI. We need SRAT for NUMA. config X86_ES7000 bool "Support for Unisys ES7000 IA32 series" @@ -544,7 +542,7 @@ config X86_PAE # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" - depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI) + depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) diff --git a/trunk/arch/i386/kernel/acpi/boot.c b/trunk/arch/i386/kernel/acpi/boot.c index ee003bc0e8b1..0db6387025ca 100644 --- a/trunk/arch/i386/kernel/acpi/boot.c +++ b/trunk/arch/i386/kernel/acpi/boot.c @@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; } #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) + ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) #define PREFIX "ACPI: " diff --git a/trunk/arch/i386/kernel/acpi/wakeup.S b/trunk/arch/i386/kernel/acpi/wakeup.S index b781b38131c0..9f408eee4e6f 100644 --- a/trunk/arch/i386/kernel/acpi/wakeup.S +++ b/trunk/arch/i386/kernel/acpi/wakeup.S @@ -292,10 +292,7 @@ ENTRY(do_suspend_lowlevel) pushl $3 call acpi_enter_sleep_state addl $4, %esp - -# In case of S3 failure, we'll emerge here. Jump -# to ret_point to recover - jmp ret_point + ret .p2align 4,,7 ret_point: call restore_registers diff --git a/trunk/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/trunk/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index e6ea00edcb54..efb41e81351c 100644 --- a/trunk/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -567,11 +567,16 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init (void) { + int result = 0; + dprintk("acpi_cpufreq_init\n"); - acpi_cpufreq_early_init_acpi(); + result = acpi_cpufreq_early_init_acpi(); - return cpufreq_register_driver(&acpi_cpufreq_driver); + if (!result) + result = cpufreq_register_driver(&acpi_cpufreq_driver); + + return (result); } diff --git a/trunk/arch/i386/pci/init.c b/trunk/arch/i386/pci/init.c index 51087a9d9172..c7650a7e0b07 100644 --- a/trunk/arch/i386/pci/init.c +++ b/trunk/arch/i386/pci/init.c @@ -14,12 +14,8 @@ static __init int pci_access_init(void) #ifdef CONFIG_PCI_BIOS pci_pcbios_init(); #endif - /* - * don't check for raw_pci_ops here because we want pcbios as last - * fallback, yet it's needed to run first to set pcibios_last_bus - * in case legacy PCI probing is used. otherwise detecting peer busses - * fails. - */ + if (raw_pci_ops) + return 0; #ifdef CONFIG_PCI_DIRECT pci_direct_init(); #endif diff --git a/trunk/arch/i386/pci/mmconfig.c b/trunk/arch/i386/pci/mmconfig.c index 972180f738d9..e545b0992c48 100644 --- a/trunk/arch/i386/pci/mmconfig.c +++ b/trunk/arch/i386/pci/mmconfig.c @@ -178,7 +178,7 @@ static __init void unreachable_devices(void) pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0)); if (addr == 0 || readl((u32 __iomem *)mmcfg_virt_addr) != val1) { - set_bit(i + 32*k, fallback_slots); + set_bit(i, fallback_slots); printk(KERN_NOTICE "PCI: No mmconfig possible on %x:%x\n", k, i); } diff --git a/trunk/arch/ia64/hp/sim/simscsi.c b/trunk/arch/ia64/hp/sim/simscsi.c index 8f0a16a79a67..8a4f0d0d17a3 100644 --- a/trunk/arch/ia64/hp/sim/simscsi.c +++ b/trunk/arch/ia64/hp/sim/simscsi.c @@ -244,8 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) if (scatterlen == 0) memcpy(sc->request_buffer, buf, len); - else for (slp = (struct scatterlist *)sc->request_buffer; - scatterlen-- > 0 && len > 0; slp++) { + else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { unsigned thislen = min(len, slp->length); memcpy(page_address(slp->page) + slp->offset, buf, thislen); diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c index 0176556aeecc..99761b81db44 100644 --- a/trunk/arch/ia64/kernel/acpi.c +++ b/trunk/arch/ia64/kernel/acpi.c @@ -55,7 +55,7 @@ #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) + ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) #define PREFIX "ACPI: " diff --git a/trunk/arch/powerpc/boot/dts/mpc8540ads.dts b/trunk/arch/powerpc/boot/dts/mpc8540ads.dts deleted file mode 100644 index 5f41c1f7a5f3..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8540ads.dts +++ /dev/null @@ -1,257 +0,0 @@ -/* - * MPC8540 ADS Device Tree Source - * - * Copyright 2006 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - - -/ { - model = "MPC8540ADS"; - compatible = "MPC85xxADS"; - #address-cells = <1>; - #size-cells = <1>; - linux,phandle = <100>; - - cpus { - #cpus = <1>; - #address-cells = <1>; - #size-cells = <0>; - linux,phandle = <200>; - - PowerPC,8540@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <20>; // 32 bytes - i-cache-line-size = <20>; // 32 bytes - d-cache-size = <8000>; // L1, 32K - i-cache-size = <8000>; // L1, 32K - timebase-frequency = <0>; // 33 MHz, from uboot - bus-frequency = <0>; // 166 MHz - clock-frequency = <0>; // 825 MHz, from uboot - 32-bit; - linux,phandle = <201>; - }; - }; - - memory { - device_type = "memory"; - linux,phandle = <300>; - reg = <00000000 08000000>; // 128M at 0x0 - }; - - soc8540@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - device_type = "soc"; - ranges = <0 e0000000 00100000>; - reg = ; // CCSRBAR 1M - bus-frequency = <0>; - - i2c@3000 { - device_type = "i2c"; - compatible = "fsl-i2c"; - reg = <3000 100>; - interrupts = <1b 2>; - interrupt-parent = <40000>; - dfsrr; - }; - - mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "mdio"; - compatible = "gianfar"; - reg = <24520 20>; - linux,phandle = <24520>; - ethernet-phy@0 { - linux,phandle = <2452000>; - interrupt-parent = <40000>; - interrupts = <35 1>; - reg = <0>; - device_type = "ethernet-phy"; - }; - ethernet-phy@1 { - linux,phandle = <2452001>; - interrupt-parent = <40000>; - interrupts = <35 1>; - reg = <1>; - device_type = "ethernet-phy"; - }; - ethernet-phy@3 { - linux,phandle = <2452003>; - interrupt-parent = <40000>; - interrupts = <37 1>; - reg = <3>; - device_type = "ethernet-phy"; - }; - }; - - ethernet@24000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <24000 1000>; - address = [ 00 E0 0C 00 73 00 ]; - local-mac-address = [ 00 E0 0C 00 73 00 ]; - interrupts = ; - interrupt-parent = <40000>; - phy-handle = <2452000>; - }; - - ethernet@25000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <25000 1000>; - address = [ 00 E0 0C 00 73 01 ]; - local-mac-address = [ 00 E0 0C 00 73 01 ]; - interrupts = <13 2 14 2 18 2>; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - - ethernet@26000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "FEC"; - compatible = "gianfar"; - reg = <26000 1000>; - address = [ 00 E0 0C 00 73 02 ]; - local-mac-address = [ 00 E0 0C 00 73 02 ]; - interrupts = <19 2>; - interrupt-parent = <40000>; - phy-handle = <2452003>; - }; - - serial@4500 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4500 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - serial@4600 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4600 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - pci@8000 { - linux,phandle = <8000>; - interrupt-map-mask = ; - interrupt-map = < - - /* IDSEL 0x02 */ - 1000 0 0 1 40000 31 1 - 1000 0 0 2 40000 32 1 - 1000 0 0 3 40000 33 1 - 1000 0 0 4 40000 34 1 - - /* IDSEL 0x03 */ - 1800 0 0 1 40000 34 1 - 1800 0 0 2 40000 31 1 - 1800 0 0 3 40000 32 1 - 1800 0 0 4 40000 33 1 - - /* IDSEL 0x04 */ - 2000 0 0 1 40000 33 1 - 2000 0 0 2 40000 34 1 - 2000 0 0 3 40000 31 1 - 2000 0 0 4 40000 32 1 - - /* IDSEL 0x05 */ - 2800 0 0 1 40000 32 1 - 2800 0 0 2 40000 33 1 - 2800 0 0 3 40000 34 1 - 2800 0 0 4 40000 31 1 - - /* IDSEL 0x0c */ - 6000 0 0 1 40000 31 1 - 6000 0 0 2 40000 32 1 - 6000 0 0 3 40000 33 1 - 6000 0 0 4 40000 34 1 - - /* IDSEL 0x0d */ - 6800 0 0 1 40000 34 1 - 6800 0 0 2 40000 31 1 - 6800 0 0 3 40000 32 1 - 6800 0 0 4 40000 33 1 - - /* IDSEL 0x0e */ - 7000 0 0 1 40000 33 1 - 7000 0 0 2 40000 34 1 - 7000 0 0 3 40000 31 1 - 7000 0 0 4 40000 32 1 - - /* IDSEL 0x0f */ - 7800 0 0 1 40000 32 1 - 7800 0 0 2 40000 33 1 - 7800 0 0 3 40000 34 1 - 7800 0 0 4 40000 31 1 - - /* IDSEL 0x12 */ - 9000 0 0 1 40000 31 1 - 9000 0 0 2 40000 32 1 - 9000 0 0 3 40000 33 1 - 9000 0 0 4 40000 34 1 - - /* IDSEL 0x13 */ - 9800 0 0 1 40000 34 1 - 9800 0 0 2 40000 31 1 - 9800 0 0 3 40000 32 1 - 9800 0 0 4 40000 33 1 - - /* IDSEL 0x14 */ - a000 0 0 1 40000 33 1 - a000 0 0 2 40000 34 1 - a000 0 0 3 40000 31 1 - a000 0 0 4 40000 32 1 - - /* IDSEL 0x15 */ - a800 0 0 1 40000 32 1 - a800 0 0 2 40000 33 1 - a800 0 0 3 40000 34 1 - a800 0 0 4 40000 31 1>; - interrupt-parent = <40000>; - interrupts = <08 2>; - bus-range = <0 0>; - ranges = <02000000 0 80000000 80000000 0 20000000 - 01000000 0 00000000 e2000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <8000 1000>; - compatible = "85xx"; - device_type = "pci"; - }; - - pic@40000 { - linux,phandle = <40000>; - clock-frequency = <0>; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <40000 40000>; - built-in; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - big-endian; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8541cds.dts b/trunk/arch/powerpc/boot/dts/mpc8541cds.dts deleted file mode 100644 index 7be0bc659e1c..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8541cds.dts +++ /dev/null @@ -1,244 +0,0 @@ -/* - * MPC8541 CDS Device Tree Source - * - * Copyright 2006 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - - -/ { - model = "MPC8541CDS"; - compatible = "MPC85xxCDS"; - #address-cells = <1>; - #size-cells = <1>; - linux,phandle = <100>; - - cpus { - #cpus = <1>; - #address-cells = <1>; - #size-cells = <0>; - linux,phandle = <200>; - - PowerPC,8541@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <20>; // 32 bytes - i-cache-line-size = <20>; // 32 bytes - d-cache-size = <8000>; // L1, 32K - i-cache-size = <8000>; // L1, 32K - timebase-frequency = <0>; // 33 MHz, from uboot - bus-frequency = <0>; // 166 MHz - clock-frequency = <0>; // 825 MHz, from uboot - 32-bit; - linux,phandle = <201>; - }; - }; - - memory { - device_type = "memory"; - linux,phandle = <300>; - reg = <00000000 08000000>; // 128M at 0x0 - }; - - soc8541@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - device_type = "soc"; - ranges = <0 e0000000 00100000>; - reg = ; // CCSRBAR 1M - bus-frequency = <0>; - - i2c@3000 { - device_type = "i2c"; - compatible = "fsl-i2c"; - reg = <3000 100>; - interrupts = <1b 2>; - interrupt-parent = <40000>; - dfsrr; - }; - - mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "mdio"; - compatible = "gianfar"; - reg = <24520 20>; - linux,phandle = <24520>; - ethernet-phy@0 { - linux,phandle = <2452000>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <0>; - device_type = "ethernet-phy"; - }; - ethernet-phy@1 { - linux,phandle = <2452001>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <1>; - device_type = "ethernet-phy"; - }; - }; - - ethernet@24000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <24000 1000>; - local-mac-address = [ 00 E0 0C 00 73 00 ]; - interrupts = ; - interrupt-parent = <40000>; - phy-handle = <2452000>; - }; - - ethernet@25000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <25000 1000>; - local-mac-address = [ 00 E0 0C 00 73 01 ]; - interrupts = <13 2 14 2 18 2>; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - - serial@4500 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4500 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - serial@4600 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4600 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - pci@8000 { - linux,phandle = <8000>; - interrupt-map-mask = <1f800 0 0 7>; - interrupt-map = < - - /* IDSEL 0x10 */ - 08000 0 0 1 40000 30 1 - 08000 0 0 2 40000 31 1 - 08000 0 0 3 40000 32 1 - 08000 0 0 4 40000 33 1 - - /* IDSEL 0x11 */ - 08800 0 0 1 40000 30 1 - 08800 0 0 2 40000 31 1 - 08800 0 0 3 40000 32 1 - 08800 0 0 4 40000 33 1 - - /* IDSEL 0x12 (Slot 1) */ - 09000 0 0 1 40000 30 1 - 09000 0 0 2 40000 31 1 - 09000 0 0 3 40000 32 1 - 09000 0 0 4 40000 33 1 - - /* IDSEL 0x13 (Slot 2) */ - 09800 0 0 1 40000 31 1 - 09800 0 0 2 40000 32 1 - 09800 0 0 3 40000 33 1 - 09800 0 0 4 40000 30 1 - - /* IDSEL 0x14 (Slot 3) */ - 0a000 0 0 1 40000 32 1 - 0a000 0 0 2 40000 33 1 - 0a000 0 0 3 40000 30 1 - 0a000 0 0 4 40000 31 1 - - /* IDSEL 0x15 (Slot 4) */ - 0a800 0 0 1 40000 33 1 - 0a800 0 0 2 40000 30 1 - 0a800 0 0 3 40000 31 1 - 0a800 0 0 4 40000 32 1 - - /* Bus 1 (Tundra Bridge) */ - /* IDSEL 0x12 (ISA bridge) */ - 19000 0 0 1 40000 30 1 - 19000 0 0 2 40000 31 1 - 19000 0 0 3 40000 32 1 - 19000 0 0 4 40000 33 1>; - interrupt-parent = <40000>; - interrupts = <08 2>; - bus-range = <0 0>; - ranges = <02000000 0 80000000 80000000 0 20000000 - 01000000 0 00000000 e2000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <8000 1000>; - compatible = "85xx"; - device_type = "pci"; - - i8259@19000 { - clock-frequency = <0>; - interrupt-controller; - device_type = "interrupt-controller"; - reg = <19000 0 0 0 1>; - #address-cells = <0>; - #interrupt-cells = <2>; - built-in; - compatible = "chrp,iic"; - big-endian; - interrupts = <1>; - interrupt-parent = <8000>; - }; - }; - - pci@9000 { - linux,phandle = <9000>; - interrupt-map-mask = ; - interrupt-map = < - - /* IDSEL 0x15 */ - a800 0 0 1 40000 3b 1 - a800 0 0 2 40000 3b 1 - a800 0 0 3 40000 3b 1 - a800 0 0 4 40000 3b 1>; - interrupt-parent = <40000>; - interrupts = <09 2>; - bus-range = <0 0>; - ranges = <02000000 0 a0000000 a0000000 0 20000000 - 01000000 0 00000000 e3000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <9000 1000>; - compatible = "85xx"; - device_type = "pci"; - }; - - pic@40000 { - linux,phandle = <40000>; - clock-frequency = <0>; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <40000 40000>; - built-in; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - big-endian; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8548cds.dts b/trunk/arch/powerpc/boot/dts/mpc8548cds.dts deleted file mode 100644 index 893d7957c174..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8548cds.dts +++ /dev/null @@ -1,287 +0,0 @@ -/* - * MPC8555 CDS Device Tree Source - * - * Copyright 2006 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - - -/ { - model = "MPC8548CDS"; - compatible = "MPC85xxCDS"; - #address-cells = <1>; - #size-cells = <1>; - linux,phandle = <100>; - - cpus { - #cpus = <1>; - #address-cells = <1>; - #size-cells = <0>; - linux,phandle = <200>; - - PowerPC,8548@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <20>; // 32 bytes - i-cache-line-size = <20>; // 32 bytes - d-cache-size = <8000>; // L1, 32K - i-cache-size = <8000>; // L1, 32K - timebase-frequency = <0>; // 33 MHz, from uboot - bus-frequency = <0>; // 166 MHz - clock-frequency = <0>; // 825 MHz, from uboot - 32-bit; - linux,phandle = <201>; - }; - }; - - memory { - device_type = "memory"; - linux,phandle = <300>; - reg = <00000000 08000000>; // 128M at 0x0 - }; - - soc8548@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - device_type = "soc"; - ranges = <0 e0000000 00100000>; - reg = ; // CCSRBAR 1M - bus-frequency = <0>; - - i2c@3000 { - device_type = "i2c"; - compatible = "fsl-i2c"; - reg = <3000 100>; - interrupts = <1b 2>; - interrupt-parent = <40000>; - dfsrr; - }; - - mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "mdio"; - compatible = "gianfar"; - reg = <24520 20>; - linux,phandle = <24520>; - ethernet-phy@0 { - linux,phandle = <2452000>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <0>; - device_type = "ethernet-phy"; - }; - ethernet-phy@1 { - linux,phandle = <2452001>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <1>; - device_type = "ethernet-phy"; - }; - - ethernet-phy@2 { - linux,phandle = <2452002>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <2>; - device_type = "ethernet-phy"; - }; - ethernet-phy@3 { - linux,phandle = <2452003>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <3>; - device_type = "ethernet-phy"; - }; - }; - - ethernet@24000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <24000 1000>; - local-mac-address = [ 00 E0 0C 00 73 00 ]; - interrupts = ; - interrupt-parent = <40000>; - phy-handle = <2452000>; - }; - - ethernet@25000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <25000 1000>; - local-mac-address = [ 00 E0 0C 00 73 01 ]; - interrupts = <13 2 14 2 18 2>; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - - ethernet@26000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <26000 1000>; - local-mac-address = [ 00 E0 0C 00 73 02 ]; - interrupts = ; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - -/* eTSEC 4 is currently broken - ethernet@27000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <27000 1000>; - local-mac-address = [ 00 E0 0C 00 73 03 ]; - interrupts = <15 2 16 2 17 2>; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - */ - - serial@4500 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4500 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - serial@4600 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4600 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - pci@8000 { - linux,phandle = <8000>; - interrupt-map-mask = <1f800 0 0 7>; - interrupt-map = < - - /* IDSEL 0x10 */ - 08000 0 0 1 40000 30 1 - 08000 0 0 2 40000 31 1 - 08000 0 0 3 40000 32 1 - 08000 0 0 4 40000 33 1 - - /* IDSEL 0x11 */ - 08800 0 0 1 40000 30 1 - 08800 0 0 2 40000 31 1 - 08800 0 0 3 40000 32 1 - 08800 0 0 4 40000 33 1 - - /* IDSEL 0x12 (Slot 1) */ - 09000 0 0 1 40000 30 1 - 09000 0 0 2 40000 31 1 - 09000 0 0 3 40000 32 1 - 09000 0 0 4 40000 33 1 - - /* IDSEL 0x13 (Slot 2) */ - 09800 0 0 1 40000 31 1 - 09800 0 0 2 40000 32 1 - 09800 0 0 3 40000 33 1 - 09800 0 0 4 40000 30 1 - - /* IDSEL 0x14 (Slot 3) */ - 0a000 0 0 1 40000 32 1 - 0a000 0 0 2 40000 33 1 - 0a000 0 0 3 40000 30 1 - 0a000 0 0 4 40000 31 1 - - /* IDSEL 0x15 (Slot 4) */ - 0a800 0 0 1 40000 33 1 - 0a800 0 0 2 40000 30 1 - 0a800 0 0 3 40000 31 1 - 0a800 0 0 4 40000 32 1 - - /* Bus 1 (Tundra Bridge) */ - /* IDSEL 0x12 (ISA bridge) */ - 19000 0 0 1 40000 30 1 - 19000 0 0 2 40000 31 1 - 19000 0 0 3 40000 32 1 - 19000 0 0 4 40000 33 1>; - interrupt-parent = <40000>; - interrupts = <08 2>; - bus-range = <0 0>; - ranges = <02000000 0 80000000 80000000 0 20000000 - 01000000 0 00000000 e2000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <8000 1000>; - compatible = "85xx"; - device_type = "pci"; - - i8259@19000 { - clock-frequency = <0>; - interrupt-controller; - device_type = "interrupt-controller"; - reg = <19000 0 0 0 1>; - #address-cells = <0>; - #interrupt-cells = <2>; - built-in; - compatible = "chrp,iic"; - big-endian; - interrupts = <1>; - interrupt-parent = <8000>; - }; - }; - - pci@9000 { - linux,phandle = <9000>; - interrupt-map-mask = ; - interrupt-map = < - - /* IDSEL 0x15 */ - a800 0 0 1 40000 3b 1 - a800 0 0 2 40000 3b 1 - a800 0 0 3 40000 3b 1 - a800 0 0 4 40000 3b 1>; - interrupt-parent = <40000>; - interrupts = <09 2>; - bus-range = <0 0>; - ranges = <02000000 0 a0000000 a0000000 0 20000000 - 01000000 0 00000000 e3000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <9000 1000>; - compatible = "85xx"; - device_type = "pci"; - }; - - pic@40000 { - linux,phandle = <40000>; - clock-frequency = <0>; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <40000 40000>; - built-in; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - big-endian; - }; - }; -}; diff --git a/trunk/arch/powerpc/boot/dts/mpc8555cds.dts b/trunk/arch/powerpc/boot/dts/mpc8555cds.dts deleted file mode 100644 index 118f5a887651..000000000000 --- a/trunk/arch/powerpc/boot/dts/mpc8555cds.dts +++ /dev/null @@ -1,244 +0,0 @@ -/* - * MPC8555 CDS Device Tree Source - * - * Copyright 2006 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - - -/ { - model = "MPC8555CDS"; - compatible = "MPC85xxCDS"; - #address-cells = <1>; - #size-cells = <1>; - linux,phandle = <100>; - - cpus { - #cpus = <1>; - #address-cells = <1>; - #size-cells = <0>; - linux,phandle = <200>; - - PowerPC,8555@0 { - device_type = "cpu"; - reg = <0>; - d-cache-line-size = <20>; // 32 bytes - i-cache-line-size = <20>; // 32 bytes - d-cache-size = <8000>; // L1, 32K - i-cache-size = <8000>; // L1, 32K - timebase-frequency = <0>; // 33 MHz, from uboot - bus-frequency = <0>; // 166 MHz - clock-frequency = <0>; // 825 MHz, from uboot - 32-bit; - linux,phandle = <201>; - }; - }; - - memory { - device_type = "memory"; - linux,phandle = <300>; - reg = <00000000 08000000>; // 128M at 0x0 - }; - - soc8555@e0000000 { - #address-cells = <1>; - #size-cells = <1>; - #interrupt-cells = <2>; - device_type = "soc"; - ranges = <0 e0000000 00100000>; - reg = ; // CCSRBAR 1M - bus-frequency = <0>; - - i2c@3000 { - device_type = "i2c"; - compatible = "fsl-i2c"; - reg = <3000 100>; - interrupts = <1b 2>; - interrupt-parent = <40000>; - dfsrr; - }; - - mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "mdio"; - compatible = "gianfar"; - reg = <24520 20>; - linux,phandle = <24520>; - ethernet-phy@0 { - linux,phandle = <2452000>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <0>; - device_type = "ethernet-phy"; - }; - ethernet-phy@1 { - linux,phandle = <2452001>; - interrupt-parent = <40000>; - interrupts = <35 0>; - reg = <1>; - device_type = "ethernet-phy"; - }; - }; - - ethernet@24000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <24000 1000>; - local-mac-address = [ 00 E0 0C 00 73 00 ]; - interrupts = <0d 2 0e 2 12 2>; - interrupt-parent = <40000>; - phy-handle = <2452000>; - }; - - ethernet@25000 { - #address-cells = <1>; - #size-cells = <0>; - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <25000 1000>; - local-mac-address = [ 00 E0 0C 00 73 01 ]; - interrupts = <13 2 14 2 18 2>; - interrupt-parent = <40000>; - phy-handle = <2452001>; - }; - - serial@4500 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4500 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - serial@4600 { - device_type = "serial"; - compatible = "ns16550"; - reg = <4600 100>; // reg base, size - clock-frequency = <0>; // should we fill in in uboot? - interrupts = <1a 2>; - interrupt-parent = <40000>; - }; - - pci@8000 { - linux,phandle = <8000>; - interrupt-map-mask = <1f800 0 0 7>; - interrupt-map = < - - /* IDSEL 0x10 */ - 08000 0 0 1 40000 30 1 - 08000 0 0 2 40000 31 1 - 08000 0 0 3 40000 32 1 - 08000 0 0 4 40000 33 1 - - /* IDSEL 0x11 */ - 08800 0 0 1 40000 30 1 - 08800 0 0 2 40000 31 1 - 08800 0 0 3 40000 32 1 - 08800 0 0 4 40000 33 1 - - /* IDSEL 0x12 (Slot 1) */ - 09000 0 0 1 40000 30 1 - 09000 0 0 2 40000 31 1 - 09000 0 0 3 40000 32 1 - 09000 0 0 4 40000 33 1 - - /* IDSEL 0x13 (Slot 2) */ - 09800 0 0 1 40000 31 1 - 09800 0 0 2 40000 32 1 - 09800 0 0 3 40000 33 1 - 09800 0 0 4 40000 30 1 - - /* IDSEL 0x14 (Slot 3) */ - 0a000 0 0 1 40000 32 1 - 0a000 0 0 2 40000 33 1 - 0a000 0 0 3 40000 30 1 - 0a000 0 0 4 40000 31 1 - - /* IDSEL 0x15 (Slot 4) */ - 0a800 0 0 1 40000 33 1 - 0a800 0 0 2 40000 30 1 - 0a800 0 0 3 40000 31 1 - 0a800 0 0 4 40000 32 1 - - /* Bus 1 (Tundra Bridge) */ - /* IDSEL 0x12 (ISA bridge) */ - 19000 0 0 1 40000 30 1 - 19000 0 0 2 40000 31 1 - 19000 0 0 3 40000 32 1 - 19000 0 0 4 40000 33 1>; - interrupt-parent = <40000>; - interrupts = <08 2>; - bus-range = <0 0>; - ranges = <02000000 0 80000000 80000000 0 20000000 - 01000000 0 00000000 e2000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <8000 1000>; - compatible = "85xx"; - device_type = "pci"; - - i8259@19000 { - clock-frequency = <0>; - interrupt-controller; - device_type = "interrupt-controller"; - reg = <19000 0 0 0 1>; - #address-cells = <0>; - #interrupt-cells = <2>; - built-in; - compatible = "chrp,iic"; - big-endian; - interrupts = <1>; - interrupt-parent = <8000>; - }; - }; - - pci@9000 { - linux,phandle = <9000>; - interrupt-map-mask = ; - interrupt-map = < - - /* IDSEL 0x15 */ - a800 0 0 1 40000 3b 1 - a800 0 0 2 40000 3b 1 - a800 0 0 3 40000 3b 1 - a800 0 0 4 40000 3b 1>; - interrupt-parent = <40000>; - interrupts = <09 2>; - bus-range = <0 0>; - ranges = <02000000 0 a0000000 a0000000 0 20000000 - 01000000 0 00000000 e3000000 0 00100000>; - clock-frequency = <3f940aa>; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <9000 1000>; - compatible = "85xx"; - device_type = "pci"; - }; - - pic@40000 { - linux,phandle = <40000>; - clock-frequency = <0>; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <40000 40000>; - built-in; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - big-endian; - }; - }; -}; diff --git a/trunk/arch/powerpc/kernel/legacy_serial.c b/trunk/arch/powerpc/kernel/legacy_serial.c index 40a39291861f..359ab89748e0 100644 --- a/trunk/arch/powerpc/kernel/legacy_serial.c +++ b/trunk/arch/powerpc/kernel/legacy_serial.c @@ -115,7 +115,6 @@ static int __init add_legacy_soc_port(struct device_node *np, u64 addr; u32 *addrp; upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; - struct device_node *tsi = of_get_parent(np); /* We only support ports that have a clock frequency properly * encoded in the device-tree. @@ -135,10 +134,7 @@ static int __init add_legacy_soc_port(struct device_node *np, /* Add port, irq will be dealt with later. We passed a translated * IO port value. It will be fixed up later along with the irq */ - if (tsi && !strcmp(tsi->type, "tsi-bridge")) - return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0); - else - return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); + return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); } static int __init add_legacy_isa_port(struct device_node *np, @@ -468,7 +464,7 @@ static int __init serial_dev_init(void) fixup_port_irq(i, np, port); if (port->iotype == UPIO_PORT) fixup_port_pio(i, np, port); - if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI)) + if (port->iotype == UPIO_MEM) fixup_port_mmio(i, np, port); } diff --git a/trunk/arch/powerpc/kernel/prom_parse.c b/trunk/arch/powerpc/kernel/prom_parse.c index 11052c212ad5..6a7e997c401d 100644 --- a/trunk/arch/powerpc/kernel/prom_parse.c +++ b/trunk/arch/powerpc/kernel/prom_parse.c @@ -598,6 +598,11 @@ static struct device_node *of_irq_find_parent(struct device_node *child) return p; } +static u8 of_irq_pci_swizzle(u8 slot, u8 pin) +{ + return (((pin - 1) + slot) % 4) + 1; +} + /* This doesn't need to be called if you don't have any special workaround * flags to pass */ @@ -886,12 +891,6 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq } EXPORT_SYMBOL_GPL(of_irq_map_one); -#ifdef CONFIG_PCI -static u8 of_irq_pci_swizzle(u8 slot, u8 pin) -{ - return (((pin - 1) + slot) % 4) + 1; -} - int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) { struct device_node *dn, *ppnode; @@ -968,4 +967,4 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) return of_irq_map_raw(ppnode, &lspec, laddr, out_irq); } EXPORT_SYMBOL_GPL(of_irq_map_pci); -#endif /* CONFIG_PCI */ + diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index 18e59e43d2b3..774c0a3c5019 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -417,7 +417,7 @@ static __inline__ void timer_check_rtc(void) /* * This version of gettimeofday has microsecond resolution. */ -static inline void __do_gettimeofday(struct timeval *tv) +static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) { unsigned long sec, usec; u64 tb_ticks, xsec; @@ -431,12 +431,7 @@ static inline void __do_gettimeofday(struct timeval *tv) * without a divide (and in fact, without a multiply) */ temp_varp = do_gtod.varp; - - /* Sampling the time base must be done after loading - * do_gtod.varp in order to avoid racing with update_gtod. - */ - data_barrier(temp_varp); - tb_ticks = get_tb() - temp_varp->tb_orig_stamp; + tb_ticks = tb_val - temp_varp->tb_orig_stamp; temp_tb_to_xs = temp_varp->tb_to_xs; temp_stamp_xsec = temp_varp->stamp_xsec; xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); @@ -469,7 +464,7 @@ void do_gettimeofday(struct timeval *tv) tv->tv_usec = usec; return; } - __do_gettimeofday(tv); + __do_gettimeofday(tv, get_tb()); } EXPORT_SYMBOL(do_gettimeofday); @@ -655,7 +650,6 @@ void timer_interrupt(struct pt_regs * regs) int next_dec; int cpu = smp_processor_id(); unsigned long ticks; - u64 tb_next_jiffy; #ifdef CONFIG_PPC32 if (atomic_read(&ppc_n_lost_interrupts) != 0) @@ -697,14 +691,11 @@ void timer_interrupt(struct pt_regs * regs) continue; write_seqlock(&xtime_lock); - tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; - if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { - tb_last_jiffy = tb_next_jiffy; - tb_last_stamp = per_cpu(last_jiffy, cpu); - do_timer(regs); - timer_recalc_offset(tb_last_jiffy); - timer_check_rtc(); - } + tb_last_jiffy += tb_ticks_per_jiffy; + tb_last_stamp = per_cpu(last_jiffy, cpu); + do_timer(regs); + timer_recalc_offset(tb_last_jiffy); + timer_check_rtc(); write_sequnlock(&xtime_lock); } diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 9b352bd0a460..e4d1713e8aea 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -585,14 +585,14 @@ static void parse_fpe(struct pt_regs *regs) #define INST_MFSPR_PVR_MASK 0xfc1fffff #define INST_DCBA 0x7c0005ec -#define INST_DCBA_MASK 0xfc0007fe +#define INST_DCBA_MASK 0x7c0007fe #define INST_MCRXR 0x7c000400 -#define INST_MCRXR_MASK 0xfc0007fe +#define INST_MCRXR_MASK 0x7c0007fe #define INST_STRING 0x7c00042a -#define INST_STRING_MASK 0xfc0007fe -#define INST_STRING_GEN_MASK 0xfc00067e +#define INST_STRING_MASK 0x7c0007fe +#define INST_STRING_GEN_MASK 0x7c00067e #define INST_LSWI 0x7c0004aa #define INST_LSWX 0x7c00042a #define INST_STSWI 0x7c0005aa diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index 5615acc29527..266b8b2ceac9 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -153,7 +153,7 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp) hpdp->pd = 0; tlb->need_flush = 1; pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM, - PGF_CACHENUM_MASK)); + HUGEPTE_TABLE_SIZE-1)); } #ifdef CONFIG_PPC_64K_PAGES diff --git a/trunk/arch/powerpc/platforms/85xx/Kconfig b/trunk/arch/powerpc/platforms/85xx/Kconfig index c3268d9877e4..454fc53289ab 100644 --- a/trunk/arch/powerpc/platforms/85xx/Kconfig +++ b/trunk/arch/powerpc/platforms/85xx/Kconfig @@ -14,6 +14,7 @@ config MPC8540_ADS config MPC85xx_CDS bool "Freescale MPC85xx CDS" select DEFAULT_UIMAGE + select PPC_I8259 if PCI help This option enables support for the MPC85xx CDS board diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 9d2acfbbeccd..06a497676c99 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -37,7 +37,79 @@ unsigned long isa_io_base = 0; unsigned long isa_mem_base = 0; #endif +/* + * Internal interrupts are all Level Sensitive, and Positive Polarity + * + * Note: Likely, this table and the following function should be + * obtained and derived from the OF Device Tree. + */ +static u_char mpc85xx_ads_openpic_initsenses[] __initdata = { + MPC85XX_INTERNAL_IRQ_SENSES, + 0x0, /* External 0: */ +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 4: PCI slot 3 */ +#else + 0x0, /* External 1: */ + 0x0, /* External 2: */ + 0x0, /* External 3: */ + 0x0, /* External 4: */ +#endif + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */ + 0x0, /* External 6: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */ + 0x0, /* External 8: */ + 0x0, /* External 9: */ + 0x0, /* External 10: */ + 0x0, /* External 11: */ +}; + #ifdef CONFIG_PCI +/* + * interrupt routing + */ + +int +mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * This is little evil, but works around the fact + * that revA boards have IDSEL starting at 18 + * and others boards (older) start at 12 + * + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 2 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 5 */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 12 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 15 */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 18 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 21 */ + }; + + const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +} + int mpc85xx_exclude_device(u_char bus, u_char devfn) { @@ -47,63 +119,44 @@ mpc85xx_exclude_device(u_char bus, u_char devfn) return PCIBIOS_SUCCESSFUL; } -void __init -mpc85xx_pcibios_fixup(void) -{ - struct pci_dev *dev = NULL; - - for_each_pci_dev(dev) - pci_read_irq_line(dev); -} #endif /* CONFIG_PCI */ void __init mpc85xx_ads_pic_init(void) { - struct mpic *mpic; - struct resource r; - struct device_node *np = NULL; - - np = of_find_node_by_type(np, "open-pic"); - - if (np == NULL) { - printk(KERN_ERR "Could not find open-pic node\n"); - return; - } - - if(of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "Could not map mpic register space\n"); - of_node_put(np); - return; - } - - mpic = mpic_alloc(np, r.start, - MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, - 4, 0, " OpenPIC "); - BUG_ON(mpic == NULL); - of_node_put(np); - - mpic_assign_isu(mpic, 0, r.start + 0x10200); - mpic_assign_isu(mpic, 1, r.start + 0x10280); - mpic_assign_isu(mpic, 2, r.start + 0x10300); - mpic_assign_isu(mpic, 3, r.start + 0x10380); - mpic_assign_isu(mpic, 4, r.start + 0x10400); - mpic_assign_isu(mpic, 5, r.start + 0x10480); - mpic_assign_isu(mpic, 6, r.start + 0x10500); - mpic_assign_isu(mpic, 7, r.start + 0x10580); - - /* Unused on this platform (leave room for 8548) */ - mpic_assign_isu(mpic, 8, r.start + 0x10600); - mpic_assign_isu(mpic, 9, r.start + 0x10680); - mpic_assign_isu(mpic, 10, r.start + 0x10700); - mpic_assign_isu(mpic, 11, r.start + 0x10780); - - /* External Interrupts */ - mpic_assign_isu(mpic, 12, r.start + 0x10000); - mpic_assign_isu(mpic, 13, r.start + 0x10080); - mpic_assign_isu(mpic, 14, r.start + 0x10100); - - mpic_init(mpic); + struct mpic *mpic1; + phys_addr_t OpenPIC_PAddr; + + /* Determine the Physical Address of the OpenPIC regs */ + OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET; + + mpic1 = mpic_alloc(OpenPIC_PAddr, + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, + 4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250, + mpc85xx_ads_openpic_initsenses, + sizeof(mpc85xx_ads_openpic_initsenses), + " OpenPIC "); + BUG_ON(mpic1 == NULL); + mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200); + mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280); + mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300); + mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380); + mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400); + mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480); + mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500); + mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580); + + /* dummy mappings to get to 48 */ + mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600); + mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680); + mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700); + mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780); + + /* External ints */ + mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000); + mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080); + mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100); + mpic_init(mpic1); } /* @@ -112,9 +165,7 @@ void __init mpc85xx_ads_pic_init(void) static void __init mpc85xx_ads_setup_arch(void) { struct device_node *cpu; -#ifdef CONFIG_PCI struct device_node *np; -#endif if (ppc_md.progress) ppc_md.progress("mpc85xx_ads_setup_arch()", 0); @@ -135,7 +186,8 @@ static void __init mpc85xx_ads_setup_arch(void) for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) add_bridge(np); - ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup; + ppc_md.pci_swizzle = common_swizzle; + ppc_md.pci_map_irq = mpc85xx_map_irq; ppc_md.pci_exclude_device = mpc85xx_exclude_device; #endif diff --git a/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 1d357d32a29f..18e6e11f7020 100644 --- a/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/trunk/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -57,8 +57,94 @@ unsigned long isa_mem_base = 0; static int cds_pci_slot = 2; static volatile u8 *cadmus; +/* + * Internal interrupts are all Level Sensitive, and Positive Polarity + * + * Note: Likely, this table and the following function should be + * obtained and derived from the OF Device Tree. + */ +static u_char mpc85xx_cds_openpic_initsenses[] __initdata = { + MPC85XX_INTERNAL_IRQ_SENSES, +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Ext 0: PCI slot 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 3 */ +#else + 0x0, /* External 0: */ + 0x0, /* External 1: */ + 0x0, /* External 2: */ + 0x0, /* External 3: */ +#endif + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */ + 0x0, /* External 6: */ + 0x0, /* External 7: */ + 0x0, /* External 8: */ + 0x0, /* External 9: */ + 0x0, /* External 10: */ +#ifdef CONFIG_PCI + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 11: PCI2 slot 0 */ +#else + 0x0, /* External 11: */ +#endif +}; + #ifdef CONFIG_PCI +/* + * interrupt routing + */ +int +mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); + + if (!hose->index) + { + /* Handle PCI1 interrupts */ + char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + + /* Note IRQ assignment for slots is based on which slot the elysium is + * in -- in this setup elysium is in slot #2 (this PIRQA as first + * interrupt on slot */ + { + { 0, 1, 2, 3 }, /* 16 - PMC */ + { 0, 1, 2, 3 }, /* 17 P2P (Tsi320) */ + { 0, 1, 2, 3 }, /* 18 - Slot 1 */ + { 1, 2, 3, 0 }, /* 19 - Slot 2 */ + { 2, 3, 0, 1 }, /* 20 - Slot 3 */ + { 3, 0, 1, 2 }, /* 21 - Slot 4 */ + }; + + const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4; + int i, j; + + for (i = 0; i < 6; i++) + for (j = 0; j < 4; j++) + pci_irq_table[i][j] = + ((pci_irq_table[i][j] + 5 - + cds_pci_slot) & 0x3) + PIRQ0A; + + return PCI_IRQ_TABLE_LOOKUP; + } else { + /* Handle PCI2 interrupts (if we have one) */ + char pci_irq_table[][4] = + { + /* + * We only have one slot and one interrupt + * going to PIRQA - PIRQD */ + { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */ + }; + + const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4; + + return PCI_IRQ_TABLE_LOOKUP; + } +} #define ARCADIA_HOST_BRIDGE_IDSEL 17 #define ARCADIA_2ND_BRIDGE_IDSEL 3 @@ -124,104 +210,50 @@ mpc85xx_cds_pcibios_fixup(void) pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); pci_dev_put(dev); } - - /* Now map all the PCI irqs */ - dev = NULL; - for_each_pci_dev(dev) - pci_read_irq_line(dev); -} - -#ifdef CONFIG_PPC_I8259 -#warning The i8259 PIC support is currently broken -static void mpc85xx_8259_cascade(unsigned int irq, struct - irq_desc *desc, struct pt_regs *regs) -{ - unsigned int cascade_irq = i8259_irq(regs); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq, regs); - - desc->chip->eoi(irq); } -#endif /* PPC_I8259 */ #endif /* CONFIG_PCI */ void __init mpc85xx_cds_pic_init(void) { - struct mpic *mpic; - struct resource r; - struct device_node *np = NULL; - struct device_node *cascade_node = NULL; - int cascade_irq; + struct mpic *mpic1; + phys_addr_t OpenPIC_PAddr; - np = of_find_node_by_type(np, "open-pic"); - - if (np == NULL) { - printk(KERN_ERR "Could not find open-pic node\n"); - return; - } + /* Determine the Physical Address of the OpenPIC regs */ + OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET; - if (of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "Failed to map mpic register space\n"); - of_node_put(np); - return; - } - - mpic = mpic_alloc(np, r.start, + mpic1 = mpic_alloc(OpenPIC_PAddr, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, - 4, 0, " OpenPIC "); - BUG_ON(mpic == NULL); - - /* Return the mpic node */ - of_node_put(np); - - mpic_assign_isu(mpic, 0, r.start + 0x10200); - mpic_assign_isu(mpic, 1, r.start + 0x10280); - mpic_assign_isu(mpic, 2, r.start + 0x10300); - mpic_assign_isu(mpic, 3, r.start + 0x10380); - mpic_assign_isu(mpic, 4, r.start + 0x10400); - mpic_assign_isu(mpic, 5, r.start + 0x10480); - mpic_assign_isu(mpic, 6, r.start + 0x10500); - mpic_assign_isu(mpic, 7, r.start + 0x10580); - - /* Used only for 8548 so far, but no harm in - * allocating them for everyone */ - mpic_assign_isu(mpic, 8, r.start + 0x10600); - mpic_assign_isu(mpic, 9, r.start + 0x10680); - mpic_assign_isu(mpic, 10, r.start + 0x10700); - mpic_assign_isu(mpic, 11, r.start + 0x10780); - - /* External Interrupts */ - mpic_assign_isu(mpic, 12, r.start + 0x10000); - mpic_assign_isu(mpic, 13, r.start + 0x10080); - mpic_assign_isu(mpic, 14, r.start + 0x10100); - - mpic_init(mpic); - -#ifdef CONFIG_PPC_I8259 - /* Initialize the i8259 controller */ - for_each_node_by_type(np, "interrupt-controller") - if (device_is_compatible(np, "chrp,iic")) { - cascade_node = np; - break; - } - - if (cascade_node == NULL) { - printk(KERN_DEBUG "Could not find i8259 PIC\n"); - return; - } + 4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250, + mpc85xx_cds_openpic_initsenses, + sizeof(mpc85xx_cds_openpic_initsenses), " OpenPIC "); + BUG_ON(mpic1 == NULL); + mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200); + mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280); + mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300); + mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380); + mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400); + mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480); + mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500); + mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580); + + /* dummy mappings to get to 48 */ + mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600); + mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680); + mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700); + mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780); + + /* External ints */ + mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000); + mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080); + mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100); + + mpic_init(mpic1); - cascade_irq = irq_of_parse_and_map(cascade_node, 0); - if (cascade_irq == NO_IRQ) { - printk(KERN_ERR "Failed to map cascade interrupt\n"); - return; - } - - i8259_init(cascade_node, 0); - of_node_put(cascade_node); +#ifdef CONFIG_PCI + mpic_setup_cascade(PIRQ0A, i8259_irq_cascade, NULL); - set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); -#endif /* CONFIG_PPC_I8259 */ + i8259_init(0,0); +#endif } @@ -266,6 +298,8 @@ mpc85xx_cds_setup_arch(void) add_bridge(np); ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup; + ppc_md.pci_swizzle = common_swizzle; + ppc_md.pci_map_irq = mpc85xx_map_irq; ppc_md.pci_exclude_device = mpc85xx_exclude_device; #endif diff --git a/trunk/arch/powerpc/platforms/86xx/mpc8641_hpcn.h b/trunk/arch/powerpc/platforms/86xx/mpc8641_hpcn.h index 41e554c4af94..5d2bcf78cef7 100644 --- a/trunk/arch/powerpc/platforms/86xx/mpc8641_hpcn.h +++ b/trunk/arch/powerpc/platforms/86xx/mpc8641_hpcn.h @@ -16,6 +16,38 @@ #include +/* PCI interrupt controller */ +#define PIRQA 3 +#define PIRQB 4 +#define PIRQC 5 +#define PIRQD 6 +#define PIRQ7 7 +#define PIRQE 9 +#define PIRQF 10 +#define PIRQG 11 +#define PIRQH 12 + +/* PCI-Express memory map */ +#define MPC86XX_PCIE_LOWER_IO 0x00000000 +#define MPC86XX_PCIE_UPPER_IO 0x00ffffff + +#define MPC86XX_PCIE_LOWER_MEM 0x80000000 +#define MPC86XX_PCIE_UPPER_MEM 0x9fffffff + +#define MPC86XX_PCIE_IO_BASE 0xe2000000 +#define MPC86XX_PCIE_MEM_OFFSET 0x00000000 + +#define MPC86XX_PCIE_IO_SIZE 0x01000000 + +#define PCIE1_CFG_ADDR_OFFSET (0x8000) +#define PCIE1_CFG_DATA_OFFSET (0x8004) + +#define PCIE2_CFG_ADDR_OFFSET (0x9000) +#define PCIE2_CFG_DATA_OFFSET (0x9004) + +#define MPC86xx_PCIE_OFFSET PCIE1_CFG_ADDR_OFFSET +#define MPC86xx_PCIE_SIZE (0x1000) + #define MPC86XX_RSTCR_OFFSET (0xe00b0) /* Reset Control Register */ #endif /* __MPC8641_HPCN_H__ */ diff --git a/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 146da3001c67..ebae73eb0063 100644 --- a/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/trunk/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -37,14 +37,6 @@ #include "mpc86xx.h" #include "mpc8641_hpcn.h" -#undef DEBUG - -#ifdef DEBUG -#define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0) -#else -#define DBG(fmt...) do { } while(0) -#endif - #ifndef CONFIG_PCI unsigned long isa_io_base = 0; unsigned long isa_mem_base = 0; @@ -52,215 +44,205 @@ unsigned long pci_dram_offset = 0; #endif -static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc, - struct pt_regs *regs) -{ - unsigned int cascade_irq = i8259_irq(regs); - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq, regs); - desc->chip->eoi(irq); -} +/* + * Internal interrupts are all Level Sensitive, and Positive Polarity + */ + +static u_char mpc86xx_hpcn_openpic_initsenses[] __initdata = { + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: Reserved */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: MCM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCIE1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: PCIE2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: Reserved */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: Reserved */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: DUART2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 1 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 1 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: TSEC 3 transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: TSEC 3 receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: TSEC 3 error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 1 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 2 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 2 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: TSEC 4 transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: TSEC 4 receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: TSEC 4 error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 2 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 32: SRIO error/write-port unit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 33: SRIO outbound doorbell */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 34: SRIO inbound doorbell */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 35: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 36: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 37: SRIO outbound message unit 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 38: SRIO inbound message unit 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 39: SRIO outbound message unit 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 40: SRIO inbound message unit 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 41: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 42: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 43: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 44: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 45: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 46: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 47: Unused */ + 0x0, /* External 0: */ + 0x0, /* External 1: */ + 0x0, /* External 2: */ + 0x0, /* External 3: */ + 0x0, /* External 4: */ + 0x0, /* External 5: */ + 0x0, /* External 6: */ + 0x0, /* External 7: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 8: Pixis FPGA */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: ULI 8259 INTR Cascade */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 10: Quad ETH PHY */ + 0x0, /* External 11: */ + 0x0, + 0x0, + 0x0, + 0x0, +}; + void __init mpc86xx_hpcn_init_irq(void) { struct mpic *mpic1; - struct device_node *np, *cascade_node = NULL; - int cascade_irq; phys_addr_t openpic_paddr; - np = of_find_node_by_type(NULL, "open-pic"); - if (np == NULL) - return; - /* Determine the Physical Address of the OpenPIC regs */ openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET; /* Alloc mpic structure and per isu has 16 INT entries. */ - mpic1 = mpic_alloc(np, openpic_paddr, + mpic1 = mpic_alloc(openpic_paddr, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, - 16, NR_IRQS - 4, + 16, MPC86xx_OPENPIC_IRQ_OFFSET, 0, 250, + mpc86xx_hpcn_openpic_initsenses, + sizeof(mpc86xx_hpcn_openpic_initsenses), " MPIC "); BUG_ON(mpic1 == NULL); - mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10000); - /* 48 Internal Interrupts */ - mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10200); - mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10400); - mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10600); + mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10200); + mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10400); + mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10600); - /* 16 External interrupts - * Moving them from [0 - 15] to [64 - 79] - */ - mpic_assign_isu(mpic1, 4, openpic_paddr + 0x10000); + /* 16 External interrupts */ + mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10000); mpic_init(mpic1); #ifdef CONFIG_PCI - /* Initialize i8259 controller */ - for_each_node_by_type(np, "interrupt-controller") - if (device_is_compatible(np, "chrp,iic")) { - cascade_node = np; - break; - } - if (cascade_node == NULL) { - printk(KERN_DEBUG "mpc86xxhpcn: no ISA interrupt controller\n"); - return; - } - - cascade_irq = irq_of_parse_and_map(cascade_node, 0); - if (cascade_irq == NO_IRQ) { - printk(KERN_ERR "mpc86xxhpcn: failed to map cascade interrupt"); - return; - } - DBG("mpc86xxhpcn: cascade mapped to irq %d\n", cascade_irq); - - i8259_init(cascade_node, 0); - set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); + mpic_setup_cascade(MPC86xx_IRQ_EXT9, i8259_irq_cascade, NULL); + i8259_init(0, I8259_OFFSET); #endif } -#ifdef CONFIG_PCI -enum pirq{PIRQA = 8, PIRQB, PIRQC, PIRQD, PIRQE, PIRQF, PIRQG, PIRQH}; -const unsigned char uli1575_irq_route_table[16] = { - 0, /* 0: Reserved */ - 0x8, /* 1: 0b1000 */ - 0, /* 2: Reserved */ - 0x2, /* 3: 0b0010 */ - 0x4, /* 4: 0b0100 */ - 0x5, /* 5: 0b0101 */ - 0x7, /* 6: 0b0111 */ - 0x6, /* 7: 0b0110 */ - 0, /* 8: Reserved */ - 0x1, /* 9: 0b0001 */ - 0x3, /* 10: 0b0011 */ - 0x9, /* 11: 0b1001 */ - 0xb, /* 12: 0b1011 */ - 0, /* 13: Reserved */ - 0xd, /* 14, 0b1101 */ - 0xf, /* 15, 0b1111 */ -}; -static int __devinit -get_pci_irq_from_of(struct pci_controller *hose, int slot, int pin) +#ifdef CONFIG_PCI +/* + * interrupt routing + */ + +int +mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) { - struct of_irq oirq; - u32 laddr[3]; - struct device_node *hosenode = hose ? hose->arch_data : NULL; - - if (!hosenode) return -EINVAL; - - laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(slot, 0) << 8); - laddr[1] = laddr[2] = 0; - of_irq_map_raw(hosenode, &pin, laddr, &oirq); - DBG("mpc86xx_hpcn: pci irq addr %x, slot %d, pin %d, irq %d\n", - laddr[0], slot, pin, oirq.specifier[0]); - return oirq.specifier[0]; + static char pci_irq_table[][4] = { + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 17 -- PCI Slot 1 */ + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 18 -- PCI Slot 2 */ + {0, 0, 0, 0}, /* IDSEL 19 */ + {0, 0, 0, 0}, /* IDSEL 20 */ + {0, 0, 0, 0}, /* IDSEL 21 */ + {0, 0, 0, 0}, /* IDSEL 22 */ + {0, 0, 0, 0}, /* IDSEL 23 */ + {0, 0, 0, 0}, /* IDSEL 24 */ + {0, 0, 0, 0}, /* IDSEL 25 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, /* IDSEL 26 -- PCI Bridge*/ + {PIRQC, 0, 0, 0}, /* IDSEL 27 -- LAN */ + {PIRQE, PIRQF, PIRQH, PIRQ7}, /* IDSEL 28 -- USB 1.1 */ + {PIRQE, PIRQF, PIRQG, 0}, /* IDSEL 29 -- Audio & Modem */ + {PIRQH, 0, 0, 0}, /* IDSEL 30 -- LPC & PMU*/ + {PIRQD, 0, 0, 0}, /* IDSEL 31 -- ATA */ + }; + + const long min_idsel = 17, max_idsel = 31, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET; } -static void __devinit quirk_uli1575(struct pci_dev *dev) +static void __devinit quirk_ali1575(struct pci_dev *dev) { unsigned short temp; - struct pci_controller *hose = pci_bus_to_host(dev->bus); - unsigned char irq2pin[16]; - unsigned long pirq_map_word = 0; - u32 irq; - int i; /* - * ULI1575 interrupts route setup - */ - memset(irq2pin, 0, 16); /* Initialize default value 0 */ - - /* - * PIRQA -> PIRQD mapping read from OF-tree - * - * interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD - * PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA - */ - for (i = 0; i < 4; i++){ - irq = get_pci_irq_from_of(hose, 17, i + 1); - if (irq > 0 && irq < 16) - irq2pin[irq] = PIRQA + i; - else - printk(KERN_WARNING "ULI1575 device" - "(slot %d, pin %d) irq %d is invalid.\n", - 17, i, irq); - } - - /* - * PIRQE -> PIRQF mapping set manually + * ALI1575 interrupts route table setup: * * IRQ pin IRQ# + * PIRQA ---- 3 + * PIRQB ---- 4 + * PIRQC ---- 5 + * PIRQD ---- 6 * PIRQE ---- 9 * PIRQF ---- 10 * PIRQG ---- 11 * PIRQH ---- 12 + * + * interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD + * PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA */ - for (i = 0; i < 4; i++) irq2pin[i + 9] = PIRQE + i; - - /* Set IRQ-PIRQ Mapping to ULI1575 */ - for (i = 0; i < 16; i++) - if (irq2pin[i]) - pirq_map_word |= (uli1575_irq_route_table[i] & 0xf) - << ((irq2pin[i] - PIRQA) * 4); + pci_write_config_dword(dev, 0x48, 0xb9317542); - /* ULI1575 IRQ mapping conf register default value is 0xb9317542 */ - DBG("Setup ULI1575 IRQ mapping configuration register value = 0x%x\n", - pirq_map_word); - pci_write_config_dword(dev, 0x48, pirq_map_word); + /* USB 1.1 OHCI controller 1, interrupt: PIRQE */ + pci_write_config_byte(dev, 0x86, 0x0c); -#define ULI1575_SET_DEV_IRQ(slot, pin, reg) \ - do { \ - int irq; \ - irq = get_pci_irq_from_of(hose, slot, pin); \ - if (irq > 0 && irq < 16) \ - pci_write_config_byte(dev, reg, irq2pin[irq]); \ - else \ - printk(KERN_WARNING "ULI1575 device" \ - "(slot %d, pin %d) irq %d is invalid.\n", \ - slot, pin, irq); \ - } while(0) + /* USB 1.1 OHCI controller 2, interrupt: PIRQF */ + pci_write_config_byte(dev, 0x87, 0x0d); - /* USB 1.1 OHCI controller 1, slot 28, pin 1 */ - ULI1575_SET_DEV_IRQ(28, 1, 0x86); + /* USB 1.1 OHCI controller 3, interrupt: PIRQH */ + pci_write_config_byte(dev, 0x88, 0x0f); - /* USB 1.1 OHCI controller 2, slot 28, pin 2 */ - ULI1575_SET_DEV_IRQ(28, 2, 0x87); + /* USB 2.0 controller, interrupt: PIRQ7 */ + pci_write_config_byte(dev, 0x74, 0x06); - /* USB 1.1 OHCI controller 3, slot 28, pin 3 */ - ULI1575_SET_DEV_IRQ(28, 3, 0x88); + /* Audio controller, interrupt: PIRQE */ + pci_write_config_byte(dev, 0x8a, 0x0c); - /* USB 2.0 controller, slot 28, pin 4 */ - irq = get_pci_irq_from_of(hose, 28, 4); - if (irq >= 0 && irq <=15) - pci_write_config_dword(dev, 0x74, uli1575_irq_route_table[irq]); + /* Modem controller, interrupt: PIRQF */ + pci_write_config_byte(dev, 0x8b, 0x0d); - /* Audio controller, slot 29, pin 1 */ - ULI1575_SET_DEV_IRQ(29, 1, 0x8a); + /* HD audio controller, interrupt: PIRQG */ + pci_write_config_byte(dev, 0x8c, 0x0e); - /* Modem controller, slot 29, pin 2 */ - ULI1575_SET_DEV_IRQ(29, 2, 0x8b); + /* Serial ATA interrupt: PIRQD */ + pci_write_config_byte(dev, 0x8d, 0x0b); - /* HD audio controller, slot 29, pin 3 */ - ULI1575_SET_DEV_IRQ(29, 3, 0x8c); + /* SMB interrupt: PIRQH */ + pci_write_config_byte(dev, 0x8e, 0x0f); - /* SMB interrupt: slot 30, pin 1 */ - ULI1575_SET_DEV_IRQ(30, 1, 0x8e); - - /* PMU ACPI SCI interrupt: slot 30, pin 2 */ - ULI1575_SET_DEV_IRQ(30, 2, 0x8f); - - /* Serial ATA interrupt: slot 31, pin 1 */ - ULI1575_SET_DEV_IRQ(31, 1, 0x8d); + /* PMU ACPI SCI interrupt: PIRQH */ + pci_write_config_byte(dev, 0x8f, 0x0f); /* Primary PATA IDE IRQ: 14 * Secondary PATA IDE IRQ: 15 */ - pci_write_config_byte(dev, 0x44, 0x30 | uli1575_irq_route_table[14]); - pci_write_config_byte(dev, 0x75, uli1575_irq_route_table[15]); + pci_write_config_byte(dev, 0x44, 0x3d); + pci_write_config_byte(dev, 0x75, 0x0f); /* Set IRQ14 and IRQ15 to legacy IRQs */ pci_read_config_word(dev, 0x46, &temp); @@ -282,8 +264,6 @@ static void __devinit quirk_uli1575(struct pci_dev *dev) */ outb(0xfa, 0x4d0); outb(0x1e, 0x4d1); - -#undef ULI1575_SET_DEV_IRQ } static void __devinit quirk_uli5288(struct pci_dev *dev) @@ -326,7 +306,7 @@ static void __devinit early_uli5249(struct pci_dev *dev) dev->class |= 0x1; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_uli1575); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249); @@ -357,6 +337,8 @@ mpc86xx_hpcn_setup_arch(void) for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) add_bridge(np); + ppc_md.pci_swizzle = common_swizzle; + ppc_md.pci_map_irq = mpc86xx_map_irq; ppc_md.pci_exclude_device = mpc86xx_exclude_device; #endif @@ -395,15 +377,6 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) } -void __init mpc86xx_hpcn_pcibios_fixup(void) -{ - struct pci_dev *dev = NULL; - - for_each_pci_dev(dev) - pci_read_irq_line(dev); -} - - /* * Called very early, device-tree isn't unflattened */ @@ -458,7 +431,6 @@ define_machine(mpc86xx_hpcn) { .setup_arch = mpc86xx_hpcn_setup_arch, .init_IRQ = mpc86xx_hpcn_init_irq, .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, - .pcibios_fixup = mpc86xx_hpcn_pcibios_fixup, .get_irq = mpic_get_irq, .restart = mpc86xx_restart, .time_init = mpc86xx_time_init, diff --git a/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index ed00ed2455dd..d7a4fc7ca238 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -1,7 +1,7 @@ /* * mpc7448_hpc2.c * - * Board setup routines for the Freescale mpc7448hpc2(taiga) platform + * Board setup routines for the Freescale Taiga platform * * Author: Jacob Pan * jacob.pan@freescale.com @@ -12,10 +12,10 @@ * * Copyright 2004-2006 Freescale Semiconductor, Inc. * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. + * This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. */ #include @@ -62,8 +62,43 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET; extern int tsi108_setup_pci(struct device_node *dev); extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); extern void tsi108_pci_int_init(void); -extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc, - struct pt_regs *regs); +extern int tsi108_irq_cascade(struct pt_regs *regs, void *unused); + +/* + * Define all of the IRQ senses and polarities. Taken from the + * mpc7448hpc manual. + * Note: Likely, this table and the following function should be + * obtained and derived from the OF Device Tree. + */ + +static u_char mpc7448_hpc2_pic_initsenses[] __initdata = { + /* External on-board sources */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[0] XINT0 from FPGA */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[1] XINT1 from FPGA */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[2] PHY_INT from both GIGE */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[3] RESERVED */ + /* Internal Tsi108/109 interrupt sources */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA0 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA1 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA2 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA3 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART0 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART1 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* I2C */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* GPIO */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE1 */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* HLP */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* SDC */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Processor IF */ + (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* PCI/X block */ +}; int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn) { @@ -194,8 +229,6 @@ static void __init mpc7448_hpc2_init_IRQ(void) { struct mpic *mpic; phys_addr_t mpic_paddr = 0; - unsigned int cascade_pci_irq; - struct device_node *tsi_pci; struct device_node *tsi_pic; tsi_pic = of_find_node_by_type(NULL, "open-pic"); @@ -213,31 +246,24 @@ static void __init mpc7448_hpc2_init_IRQ(void) DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__, (u32) mpic_paddr); - mpic = mpic_alloc(tsi_pic, mpic_paddr, + mpic = mpic_alloc(mpic_paddr, MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108), 0, /* num_sources used */ + TSI108_IRQ_BASE, 0, /* num_sources used */ - "Tsi108_PIC"); + NR_IRQS - 4 /* XXXX */, + mpc7448_hpc2_pic_initsenses, + sizeof(mpc7448_hpc2_pic_initsenses), "Tsi108_PIC"); BUG_ON(mpic == NULL); /* XXXX */ - mpic_init(mpic); - - tsi_pci = of_find_node_by_type(NULL, "pci"); - if (tsi_pci == 0) { - printk("%s: No tsi108 pci node found !\n", __FUNCTION__); - return; - } - - cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); - set_irq_data(cascade_pci_irq, mpic); - set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + mpic_init(mpic); + mpic_setup_cascade(IRQ_TSI108_PCI, tsi108_irq_cascade, mpic); tsi108_pci_int_init(); /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); - of_node_put(tsi_pic); } void mpc7448_hpc2_show_cpuinfo(struct seq_file *m) @@ -294,7 +320,6 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs) return 0; } - define_machine(mpc7448_hpc2){ .name = "MPC7448 HPC2", .probe = mpc7448_hpc2_probe, diff --git a/trunk/arch/powerpc/platforms/powermac/bootx_init.c b/trunk/arch/powerpc/platforms/powermac/bootx_init.c index 9d73d0234c5d..6a026c733f6a 100644 --- a/trunk/arch/powerpc/platforms/powermac/bootx_init.c +++ b/trunk/arch/powerpc/platforms/powermac/bootx_init.c @@ -411,15 +411,8 @@ static unsigned long __init bootx_flatten_dt(unsigned long start) DBG("End of boot params: %x\n", mem_end); rsvmap[0] = mem_start; rsvmap[1] = mem_end; - if (bootx_info->ramDisk) { - rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk; - rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize; - rsvmap[4] = 0; - rsvmap[5] = 0; - } else { - rsvmap[2] = 0; - rsvmap[3] = 0; - } + rsvmap[2] = 0; + rsvmap[3] = 0; return (unsigned long)hdr; } @@ -550,12 +543,12 @@ void __init bootx_init(unsigned long r3, unsigned long r4) */ if (bi->version < 5) { space = bi->deviceTreeOffset + bi->deviceTreeSize; - if (bi->ramDisk >= space) + if (bi->ramDisk) space = bi->ramDisk + bi->ramDiskSize; } else space = bi->totalParamsSize; - bootx_printf("Total space used by parameters & ramdisk: 0x%x \n", space); + bootx_printf("Total space used by parameters & ramdisk: %x \n", space); /* New BootX will have flushed all TLBs and enters kernel with * MMU switched OFF, so this should not be useful anymore. diff --git a/trunk/arch/powerpc/sysdev/fsl_soc.c b/trunk/arch/powerpc/sysdev/fsl_soc.c index ef10bcf2d943..12b65609c072 100644 --- a/trunk/arch/powerpc/sysdev/fsl_soc.c +++ b/trunk/arch/powerpc/sysdev/fsl_soc.c @@ -85,8 +85,11 @@ static int __init gfar_mdio_of_init(void) mdio_data.irq[k] = -1; while ((child = of_get_next_child(np, child)) != NULL) { - u32 *id = get_property(child, "reg", NULL); - mdio_data.irq[*id] = irq_of_parse_and_map(child, 0); + if (child->n_intrs) { + u32 *id = + (u32 *) get_property(child, "reg", NULL); + mdio_data.irq[*id] = child->intrs[0].line; + } } ret = @@ -128,7 +131,6 @@ static int __init gfar_of_init(void) char *model; void *mac_addr; phandle *ph; - int n_res = 1; memset(r, 0, sizeof(r)); memset(&gfar_data, 0, sizeof(gfar_data)); @@ -137,7 +139,8 @@ static int __init gfar_of_init(void) if (ret) goto err; - r[1].start = r[1].end = irq_of_parse_and_map(np, 0); + r[1].start = np->intrs[0].line; + r[1].end = np->intrs[0].line; r[1].flags = IORESOURCE_IRQ; model = get_property(np, "model", NULL); @@ -147,19 +150,19 @@ static int __init gfar_of_init(void) r[1].name = gfar_tx_intr; r[2].name = gfar_rx_intr; - r[2].start = r[2].end = irq_of_parse_and_map(np, 1); + r[2].start = np->intrs[1].line; + r[2].end = np->intrs[1].line; r[2].flags = IORESOURCE_IRQ; r[3].name = gfar_err_intr; - r[3].start = r[3].end = irq_of_parse_and_map(np, 2); + r[3].start = np->intrs[2].line; + r[3].end = np->intrs[2].line; r[3].flags = IORESOURCE_IRQ; - - n_res += 2; } gfar_dev = platform_device_register_simple("fsl-gianfar", i, &r[0], - n_res + 1); + np->n_intrs + 1); if (IS_ERR(gfar_dev)) { ret = PTR_ERR(gfar_dev); @@ -256,7 +259,8 @@ static int __init fsl_i2c_of_init(void) if (ret) goto err; - r[1].start = r[1].end = irq_of_parse_and_map(np, 0); + r[1].start = np->intrs[0].line; + r[1].end = np->intrs[0].line; r[1].flags = IORESOURCE_IRQ; i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2); @@ -392,7 +396,8 @@ static int __init fsl_usb_of_init(void) if (ret) goto err; - r[1].start = r[1].end = irq_of_parse_and_map(np, 0); + r[1].start = np->intrs[0].line; + r[1].end = np->intrs[0].line; r[1].flags = IORESOURCE_IRQ; usb_dev_mph = @@ -440,7 +445,8 @@ static int __init fsl_usb_of_init(void) if (ret) goto unreg_mph; - r[1].start = r[1].end = irq_of_parse_and_map(np, 0); + r[1].start = np->intrs[0].line; + r[1].end = np->intrs[0].line; r[1].flags = IORESOURCE_IRQ; usb_dev_dr = diff --git a/trunk/arch/powerpc/sysdev/tsi108_dev.c b/trunk/arch/powerpc/sysdev/tsi108_dev.c index f3038461d4c0..26a0cc820cde 100644 --- a/trunk/arch/powerpc/sysdev/tsi108_dev.c +++ b/trunk/arch/powerpc/sysdev/tsi108_dev.c @@ -93,15 +93,13 @@ static int __init tsi108_eth_of_init(void) goto err; r[1].name = "tx"; - r[1].start = irq_of_parse_and_map(np, 0); - r[1].end = irq_of_parse_and_map(np, 0); + r[1].start = np->intrs[0].line; + r[1].end = np->intrs[0].line; r[1].flags = IORESOURCE_IRQ; - DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", - __FUNCTION__,r[1].name, r[1].start, r[1].end); tsi_eth_dev = platform_device_register_simple("tsi-ethernet", i, &r[0], - 1); + np->n_intrs + 1); if (IS_ERR(tsi_eth_dev)) { ret = PTR_ERR(tsi_eth_dev); @@ -129,7 +127,7 @@ static int __init tsi108_eth_of_init(void) tsi_eth_data.regs = r[0].start; tsi_eth_data.phyregs = res.start; tsi_eth_data.phy = *phy_id; - tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0); + tsi_eth_data.irq_num = np->intrs[0].line; of_node_put(phy); ret = platform_device_add_data(tsi_eth_dev, &tsi_eth_data, diff --git a/trunk/arch/powerpc/sysdev/tsi108_pci.c b/trunk/arch/powerpc/sysdev/tsi108_pci.c index 2ab06ed3ae73..3265d54c82ed 100644 --- a/trunk/arch/powerpc/sysdev/tsi108_pci.c +++ b/trunk/arch/powerpc/sysdev/tsi108_pci.c @@ -26,6 +26,7 @@ #include #include + #include #include #include @@ -227,7 +228,7 @@ int __init tsi108_setup_pci(struct device_node *dev) (hose)->ops = &tsi108_direct_pci_ops; - printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. " + printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08lx. " "Firmware bus number: %d->%d\n", rsrc.start, hose->first_busno, hose->last_busno); @@ -277,7 +278,7 @@ static void init_pci_source(void) mb(); } -static inline unsigned int get_pci_source(void) +static inline int get_pci_source(void) { u_int temp = 0; int irq = -1; @@ -370,12 +371,12 @@ static void tsi108_pci_irq_end(u_int irq) * Interrupt controller descriptor for cascaded PCI interrupt controller. */ -static struct irq_chip tsi108_pci_irq = { +struct hw_interrupt_type tsi108_pci_irq = { .typename = "tsi108_PCI_int", - .mask = tsi108_pci_irq_disable, + .enable = tsi108_pci_irq_enable, + .disable = tsi108_pci_irq_disable, .ack = tsi108_pci_irq_ack, .end = tsi108_pci_irq_end, - .unmask = tsi108_pci_irq_enable, }; /* @@ -398,18 +399,14 @@ void __init tsi108_pci_int_init(void) DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); for (i = 0; i < NUM_PCI_IRQS; i++) { - irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq; + irq_desc[i + IRQ_PCI_INTAD_BASE].handler = &tsi108_pci_irq; irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL; } init_pci_source(); } -void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc, - struct pt_regs *regs) +int tsi108_irq_cascade(struct pt_regs *regs, void *unused) { - unsigned int cascade_irq = get_pci_source(); - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq, regs); - desc->chip->eoi(irq); + return get_pci_source(); } diff --git a/trunk/arch/sparc/kernel/setup.c b/trunk/arch/sparc/kernel/setup.c index 0251cab4708b..35488d6c7457 100644 --- a/trunk/arch/sparc/kernel/setup.c +++ b/trunk/arch/sparc/kernel/setup.c @@ -348,9 +348,9 @@ void __init setup_arch(char **cmdline_p) init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; - paging_init(); - smp_setup_cpu_possible_map(); + + paging_init(); } static int __init set_preferred_console(void) diff --git a/trunk/arch/sparc/kernel/smp.c b/trunk/arch/sparc/kernel/smp.c index 276f22881d0f..e311ade1b490 100644 --- a/trunk/arch/sparc/kernel/smp.c +++ b/trunk/arch/sparc/kernel/smp.c @@ -34,6 +34,7 @@ #include #include +volatile int smp_processors_ready = 0; int smp_num_cpus = 1; volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; unsigned char boot_cpu_id = 0; diff --git a/trunk/arch/sparc/kernel/sun4d_smp.c b/trunk/arch/sparc/kernel/sun4d_smp.c index 3ff4edd32815..ba843f6a2832 100644 --- a/trunk/arch/sparc/kernel/sun4d_smp.c +++ b/trunk/arch/sparc/kernel/sun4d_smp.c @@ -42,7 +42,7 @@ extern ctxd_t *srmmu_ctx_table_phys; extern void calibrate_delay(void); -static volatile int smp_processors_ready = 0; +extern volatile int smp_processors_ready; static int smp_highest_cpu; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpuinfo_sparc cpu_data[NR_CPUS]; diff --git a/trunk/arch/sparc/kernel/sun4m_smp.c b/trunk/arch/sparc/kernel/sun4m_smp.c index 7d4a649138f6..3b32096134aa 100644 --- a/trunk/arch/sparc/kernel/sun4m_smp.c +++ b/trunk/arch/sparc/kernel/sun4m_smp.c @@ -39,6 +39,7 @@ extern ctxd_t *srmmu_ctx_table_phys; extern void calibrate_delay(void); +extern volatile int smp_processors_ready; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern unsigned char boot_cpu_id; @@ -216,6 +217,7 @@ void __init smp4m_smp_done(void) } /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; } /* At each hardware IRQ, we get this called to forward IRQ reception diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3a3aee08ec5f..aae3123bf3ee 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1561,7 +1561,7 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, /* ->key must be copied to avoid race with cfq_exit_queue() */ k = __cic->key; if (unlikely(!k)) { - cfq_drop_dead_cic(ioc, __cic); + cfq_drop_dead_cic(ioc, cic); goto restart; } diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c index 9b72dc7c8a5c..bc7baeec0d10 100644 --- a/trunk/block/elevator.c +++ b/trunk/block/elevator.c @@ -765,8 +765,7 @@ void elv_unregister(struct elevator_type *e) read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); - if (p->io_context) - e->ops.trim(p->io_context); + e->ops.trim(p->io_context); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); diff --git a/trunk/block/ll_rw_blk.c b/trunk/block/ll_rw_blk.c index ddd9253f9d55..61d6b3c65b66 100644 --- a/trunk/block/ll_rw_blk.c +++ b/trunk/block/ll_rw_blk.c @@ -3628,8 +3628,6 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; ret->cic_root.rb_node = NULL; - /* make sure set_task_ioprio() sees the settings above */ - smp_wmb(); tsk->io_context = ret; } diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index 11abc7bf777e..96309b9660da 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -285,8 +285,6 @@ static int __init acpi_ac_init(void) { int result; - if (acpi_disabled) - return -ENODEV; acpi_ac_dir = acpi_lock_ac_dir(); if (!acpi_ac_dir) diff --git a/trunk/drivers/acpi/acpi_memhotplug.c b/trunk/drivers/acpi/acpi_memhotplug.c index 1dda370f402b..b0d4b147b19e 100644 --- a/trunk/drivers/acpi/acpi_memhotplug.c +++ b/trunk/drivers/acpi/acpi_memhotplug.c @@ -484,8 +484,10 @@ acpi_memory_register_notify_handler(acpi_handle handle, status = is_memory_device(handle); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)){ + ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device")); return AE_OK; /* continue */ + } status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify, NULL); @@ -501,8 +503,10 @@ acpi_memory_deregister_notify_handler(acpi_handle handle, status = is_memory_device(handle); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)){ + ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device")); return AE_OK; /* continue */ + } status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, diff --git a/trunk/drivers/acpi/battery.c b/trunk/drivers/acpi/battery.c index 9810e2a55d0a..6e5221707d97 100644 --- a/trunk/drivers/acpi/battery.c +++ b/trunk/drivers/acpi/battery.c @@ -757,9 +757,6 @@ static int __init acpi_battery_init(void) { int result; - if (acpi_disabled) - return -ENODEV; - acpi_battery_dir = acpi_lock_battery_dir(); if (!acpi_battery_dir) return -ENODEV; diff --git a/trunk/drivers/acpi/bus.c b/trunk/drivers/acpi/bus.c index 279c4bac92e5..b2977695e120 100644 --- a/trunk/drivers/acpi/bus.c +++ b/trunk/drivers/acpi/bus.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -69,8 +68,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device); if (ACPI_FAILURE(status) || !*device) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", - handle)); + ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle)); return -ENODEV; } @@ -194,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state) /* Make sure this is a valid target state */ if (!device->flags.power_manageable) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable", device->kobj.name)); return -ENODEV; } @@ -740,10 +738,7 @@ static int __init acpi_init(void) return -ENODEV; } - result = firmware_register(&acpi_subsys); - if (result < 0) - printk(KERN_WARNING "%s: firmware_register error: %d\n", - __FUNCTION__, result); + firmware_register(&acpi_subsys); result = acpi_bus_init(); diff --git a/trunk/drivers/acpi/hotkey.c b/trunk/drivers/acpi/hotkey.c index 1ba2db671865..32c9d88fd196 100644 --- a/trunk/drivers/acpi/hotkey.c +++ b/trunk/drivers/acpi/hotkey.c @@ -91,14 +91,6 @@ enum { HK_EVENT_ENTERRING_S5, }; -enum conf_entry_enum { - bus_handle = 0, - bus_method = 1, - action_handle = 2, - method = 3, - LAST_CONF_ENTRY -}; - /* procdir we use */ static struct proc_dir_entry *hotkey_proc_dir; static struct proc_dir_entry *hotkey_config; @@ -252,15 +244,19 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file) static char *format_result(union acpi_object *object) { - char *buf; + char *buf = NULL; + + buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL); + if (buf) + memset(buf, 0, RESULT_STR_LEN); + else + goto do_fail; - buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL); - if (!buf) - return NULL; /* Now, just support integer type */ if (object->type == ACPI_TYPE_INTEGER) sprintf(buf, "%d\n", (u32) object->integer.value); - return buf; + do_fail: + return (buf); } static int hotkey_polling_seq_show(struct seq_file *seq, void *offset) @@ -490,102 +486,98 @@ static void free_hotkey_device(union acpi_hotkey *key) static void free_hotkey_buffer(union acpi_hotkey *key) { - /* key would never be null, action method could be */ kfree(key->event_hotkey.action_method); } static void free_poll_hotkey_buffer(union acpi_hotkey *key) { - /* key would never be null, others could be*/ kfree(key->poll_hotkey.action_method); kfree(key->poll_hotkey.poll_method); kfree(key->poll_hotkey.poll_result); } static int -init_hotkey_device(union acpi_hotkey *key, char **config_entry, - int std_num, int external_num) +init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str, + char *method, int std_num, int external_num) { acpi_handle tmp_handle; acpi_status status = AE_OK; + if (std_num < 0 || IS_POLL(std_num) || !key) goto do_fail; - if (!config_entry[bus_handle] || !config_entry[action_handle] - || !config_entry[method]) + if (!bus_str || !action_str || !method) goto do_fail; key->link.hotkey_type = ACPI_HOTKEY_EVENT; key->link.hotkey_standard_num = std_num; key->event_hotkey.flag = 0; - key->event_hotkey.action_method = config_entry[method]; + key->event_hotkey.action_method = method; - status = acpi_get_handle(NULL, config_entry[bus_handle], - &(key->event_hotkey.bus_handle)); + status = + acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle)); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; key->event_hotkey.external_hotkey_num = external_num; - status = acpi_get_handle(NULL, config_entry[action_handle], + status = + acpi_get_handle(NULL, action_str, &(key->event_hotkey.action_handle)); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; status = acpi_get_handle(key->event_hotkey.action_handle, - config_entry[method], &tmp_handle); + method, &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; return AE_OK; -do_fail_zero: - key->event_hotkey.action_method = NULL; -do_fail: + do_fail: return -ENODEV; } static int -init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry, - int std_num) +init_poll_hotkey_device(union acpi_hotkey *key, + char *poll_str, + char *poll_method, + char *action_str, char *action_method, int std_num) { acpi_status status = AE_OK; acpi_handle tmp_handle; + if (std_num < 0 || IS_EVENT(std_num) || !key) goto do_fail; - if (!config_entry[bus_handle] ||!config_entry[bus_method] || - !config_entry[action_handle] || !config_entry[method]) + + if (!poll_str || !poll_method || !action_str || !action_method) goto do_fail; key->link.hotkey_type = ACPI_HOTKEY_POLLING; key->link.hotkey_standard_num = std_num; key->poll_hotkey.flag = 0; - key->poll_hotkey.poll_method = config_entry[bus_method]; - key->poll_hotkey.action_method = config_entry[method]; + key->poll_hotkey.poll_method = poll_method; + key->poll_hotkey.action_method = action_method; - status = acpi_get_handle(NULL, config_entry[bus_handle], - &(key->poll_hotkey.poll_handle)); + status = + acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle)); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; status = acpi_get_handle(key->poll_hotkey.poll_handle, - config_entry[bus_method], &tmp_handle); + poll_method, &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; status = - acpi_get_handle(NULL, config_entry[action_handle], + acpi_get_handle(NULL, action_str, &(key->poll_hotkey.action_handle)); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; status = acpi_get_handle(key->poll_hotkey.action_handle, - config_entry[method], &tmp_handle); + action_method, &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail_zero; + goto do_fail; key->poll_hotkey.poll_result = (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!key->poll_hotkey.poll_result) - goto do_fail_zero; + goto do_fail; return AE_OK; - -do_fail_zero: - key->poll_hotkey.poll_method = NULL; - key->poll_hotkey.action_method = NULL; -do_fail: + do_fail: return -ENODEV; } @@ -660,18 +652,17 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset) } static int -get_parms(char *config_record, int *cmd, char **config_entry, - int *internal_event_num, int *external_event_num) +get_parms(char *config_record, + int *cmd, + char **bus_handle, + char **bus_method, + char **action_handle, + char **method, int *internal_event_num, int *external_event_num) { -/* the format of *config_record = - * "1:\d+:*" : "cmd:internal_event_num" - * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" : - * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num" - */ char *tmp, *tmp1, count; - int i; sscanf(config_record, "%d", cmd); + if (*cmd == 1) { if (sscanf(config_record, "%d:%d", cmd, internal_event_num) != 2) @@ -683,27 +674,59 @@ get_parms(char *config_record, int *cmd, char **config_entry, if (!tmp) goto do_fail; tmp++; - for (i = 0; i < LAST_CONF_ENTRY; i++) { - tmp1 = strchr(tmp, ':'); - if (!tmp1) { - goto do_fail; - } - count = tmp1 - tmp; - config_entry[i] = kzalloc(count + 1, GFP_KERNEL); - if (!config_entry[i]) - goto handle_failure; - strncpy(config_entry[i], tmp, count); - tmp = tmp1 + 1; - } - if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0) - goto handle_failure; - if (!IS_OTHERS(*internal_event_num)) { - return 6; - } -handle_failure: - while (i-- > 0) - kfree(config_entry[i]); -do_fail: + tmp1 = strchr(tmp, ':'); + if (!tmp1) + goto do_fail; + + count = tmp1 - tmp; + *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL); + if (!*bus_handle) + goto do_fail; + strncpy(*bus_handle, tmp, count); + *(*bus_handle + count) = 0; + + tmp = tmp1; + tmp++; + tmp1 = strchr(tmp, ':'); + if (!tmp1) + goto do_fail; + count = tmp1 - tmp; + *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL); + if (!*bus_method) + goto do_fail; + strncpy(*bus_method, tmp, count); + *(*bus_method + count) = 0; + + tmp = tmp1; + tmp++; + tmp1 = strchr(tmp, ':'); + if (!tmp1) + goto do_fail; + count = tmp1 - tmp; + *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL); + if (!*action_handle) + goto do_fail; + strncpy(*action_handle, tmp, count); + *(*action_handle + count) = 0; + + tmp = tmp1; + tmp++; + tmp1 = strchr(tmp, ':'); + if (!tmp1) + goto do_fail; + count = tmp1 - tmp; + *method = (char *)kmalloc(count + 1, GFP_KERNEL); + if (!*method) + goto do_fail; + strncpy(*method, tmp, count); + *(*method + count) = 0; + + if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <= + 0) + goto do_fail; + + return 6; + do_fail: return -1; } @@ -713,34 +736,50 @@ static ssize_t hotkey_write_config(struct file *file, size_t count, loff_t * data) { char *config_record = NULL; - char *config_entry[LAST_CONF_ENTRY]; + char *bus_handle = NULL; + char *bus_method = NULL; + char *action_handle = NULL; + char *method = NULL; int cmd, internal_event_num, external_event_num; int ret = 0; - union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL); + union acpi_hotkey *key = NULL; - if (!key) - return -ENOMEM; - config_record = kzalloc(count + 1, GFP_KERNEL); - if (!config_record) { - kfree(key); + config_record = (char *)kmalloc(count + 1, GFP_KERNEL); + if (!config_record) return -ENOMEM; - } if (copy_from_user(config_record, buffer, count)) { kfree(config_record); - kfree(key); printk(KERN_ERR PREFIX "Invalid data\n"); return -EINVAL; } - ret = get_parms(config_record, &cmd, config_entry, - &internal_event_num, &external_event_num); + config_record[count] = 0; + + ret = get_parms(config_record, + &cmd, + &bus_handle, + &bus_method, + &action_handle, + &method, &internal_event_num, &external_event_num); + kfree(config_record); + if (IS_OTHERS(internal_event_num)) + goto do_fail; if (ret != 6) { + do_fail: + kfree(bus_handle); + kfree(bus_method); + kfree(action_handle); + kfree(method); printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret); return -EINVAL; } + key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL); + if (!key) + goto do_fail; + memset(key, 0, sizeof(union acpi_hotkey)); if (cmd == 1) { union acpi_hotkey *tmp = NULL; tmp = get_hotkey_by_event(&global_hotkey_list, @@ -752,19 +791,34 @@ static ssize_t hotkey_write_config(struct file *file, goto cont_cmd; } if (IS_EVENT(internal_event_num)) { - if (init_hotkey_device(key, config_entry, - internal_event_num, external_event_num)) - goto init_hotkey_fail; - } else { - if (init_poll_hotkey_device(key, config_entry, - internal_event_num)) - goto init_poll_hotkey_fail; + kfree(bus_method); + ret = init_hotkey_device(key, bus_handle, action_handle, method, + internal_event_num, + external_event_num); + } else + ret = init_poll_hotkey_device(key, bus_handle, bus_method, + action_handle, method, + internal_event_num); + if (ret) { + kfree(bus_handle); + kfree(action_handle); + if (IS_EVENT(internal_event_num)) + free_hotkey_buffer(key); + else + free_poll_hotkey_buffer(key); + kfree(key); + printk(KERN_ERR PREFIX "Invalid hotkey\n"); + return -EINVAL; } -cont_cmd: + + cont_cmd: + kfree(bus_handle); + kfree(action_handle); + switch (cmd) { case 0: - if (get_hotkey_by_event(&global_hotkey_list, - key->link.hotkey_standard_num)) + if (get_hotkey_by_event + (&global_hotkey_list, key->link.hotkey_standard_num)) goto fail_out; else hotkey_add(key); @@ -773,7 +827,6 @@ static ssize_t hotkey_write_config(struct file *file, hotkey_remove(key); break; case 2: - /* key is kfree()ed if matched*/ if (hotkey_update(key)) goto fail_out; break; @@ -782,22 +835,11 @@ static ssize_t hotkey_write_config(struct file *file, break; } return count; - -init_poll_hotkey_fail: /* failed init_poll_hotkey_device */ - kfree(config_entry[bus_method]); - config_entry[bus_method] = NULL; -init_hotkey_fail: /* failed init_hotkey_device */ - kfree(config_entry[method]); -fail_out: - kfree(config_entry[bus_handle]); - kfree(config_entry[action_handle]); - /* No double free since elements =NULL for error cases */ - if (IS_EVENT(internal_event_num)) { - if (config_entry[bus_method]) - kfree(config_entry[bus_method]); - free_hotkey_buffer(key); /* frees [method] */ - } else - free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */ + fail_out: + if (IS_EVENT(internal_event_num)) + free_hotkey_buffer(key); + else + free_poll_hotkey_buffer(key); kfree(key); printk(KERN_ERR PREFIX "invalid key\n"); return -EINVAL; @@ -881,9 +923,10 @@ static ssize_t hotkey_execute_aml_method(struct file *file, union acpi_hotkey *key; - arg = kzalloc(count + 1, GFP_KERNEL); + arg = (char *)kmalloc(count + 1, GFP_KERNEL); if (!arg) return -ENOMEM; + arg[count] = 0; if (copy_from_user(arg, buffer, count)) { kfree(arg); diff --git a/trunk/drivers/acpi/i2c_ec.c b/trunk/drivers/acpi/i2c_ec.c index 6809c283ec58..84239d51dc0c 100644 --- a/trunk/drivers/acpi/i2c_ec.c +++ b/trunk/drivers/acpi/i2c_ec.c @@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device) status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n")); - kfree(ec_hc); + kfree(ec_hc->smbus); kfree(smbus); return -EIO; } diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c index 507f051d1cef..b7d1514cd199 100644 --- a/trunk/drivers/acpi/osl.c +++ b/trunk/drivers/acpi/osl.c @@ -746,16 +746,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); - /* - * This can be called during resume with interrupts off. - * Like boot-time, we should be single threaded and will - * always get the lock if we try -- timeout or not. - * If this doesn't succeed, then we will oops courtesy of - * might_sleep() in down(). - */ - if (!down_trylock(sem)) - return AE_OK; - switch (timeout) { /* * No Wait: diff --git a/trunk/drivers/acpi/sbs.c b/trunk/drivers/acpi/sbs.c index 62bef0b3b614..db7b350a5035 100644 --- a/trunk/drivers/acpi/sbs.c +++ b/trunk/drivers/acpi/sbs.c @@ -1714,9 +1714,6 @@ static int __init acpi_sbs_init(void) { int result = 0; - if (acpi_disabled) - return -ENODEV; - init_MUTEX(&sbs_sem); if (capacity_mode != DEF_CAPACITY_UNIT diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index 698a1540e303..5fcb50c7b778 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -4,7 +4,6 @@ #include #include -#include #include #include @@ -114,8 +113,6 @@ static struct kset acpi_namespace_kset = { static void acpi_device_register(struct acpi_device *device, struct acpi_device *parent) { - int err; - /* * Linkage * ------- @@ -141,10 +138,7 @@ static void acpi_device_register(struct acpi_device *device, device->kobj.parent = &parent->kobj; device->kobj.ktype = &ktype_acpi_ns; device->kobj.kset = &acpi_namespace_kset; - err = kobject_register(&device->kobj); - if (err < 0) - printk(KERN_WARNING "%s: kobject_register error: %d\n", - __FUNCTION__, err); + kobject_register(&device->kobj); create_sysfs_device_files(device); } @@ -1456,9 +1450,7 @@ static int __init acpi_scan_init(void) if (acpi_disabled) return 0; - result = kset_register(&acpi_namespace_kset); - if (result < 0) - printk(KERN_ERR PREFIX "kset_register error: %d\n", result); + kset_register(&acpi_namespace_kset); result = bus_register(&acpi_bus_type); if (result) { diff --git a/trunk/drivers/acpi/utils.c b/trunk/drivers/acpi/utils.c index d0d84c43a9d4..f48227f4c8c9 100644 --- a/trunk/drivers/acpi/utils.c +++ b/trunk/drivers/acpi/utils.c @@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle, if (!data) return AE_BAD_PARAMETER; - element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL); + element = kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!element) return AE_NO_MEMORY; diff --git a/trunk/drivers/base/node.c b/trunk/drivers/base/node.c index e9b0957f15d1..d7de1753e094 100644 --- a/trunk/drivers/base/node.c +++ b/trunk/drivers/base/node.c @@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) "Node %d Mapped: %8lu kB\n" "Node %d AnonPages: %8lu kB\n" "Node %d PageTables: %8lu kB\n" - "Node %d NFS_Unstable: %8lu kB\n" + "Node %d NFS Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" "Node %d Slab: %8lu kB\n", nid, K(i.totalram), diff --git a/trunk/drivers/cdrom/gscd.c b/trunk/drivers/cdrom/gscd.c index fa7082489765..b6ee50a2916d 100644 --- a/trunk/drivers/cdrom/gscd.c +++ b/trunk/drivers/cdrom/gscd.c @@ -266,7 +266,7 @@ static void __do_gscd_request(unsigned long dummy) goto out; if (req->cmd != READ) { - printk("GSCD: bad cmd %u\n", rq_data_dir(req)); + printk("GSCD: bad cmd %lu\n", rq_data_dir(req)); end_request(req, 0); goto repeat; } diff --git a/trunk/drivers/char/moxa.c b/trunk/drivers/char/moxa.c index a369dd6877d8..4ea7bd5f4f56 100644 --- a/trunk/drivers/char/moxa.c +++ b/trunk/drivers/char/moxa.c @@ -142,7 +142,6 @@ typedef struct _moxa_board_conf { static moxa_board_conf moxa_boards[MAX_BOARDS]; static void __iomem *moxaBaseAddr[MAX_BOARDS]; -static int loadstat[MAX_BOARDS]; struct moxa_str { int type; @@ -1689,8 +1688,6 @@ int MoxaDriverPoll(void) if (moxaCard == 0) return (-1); for (card = 0; card < MAX_BOARDS; card++) { - if (loadstat[card] == 0) - continue; if ((ports = moxa_boards[card].numPorts) == 0) continue; if (readb(moxaIntPend[card]) == 0xff) { @@ -2906,7 +2903,6 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len) } break; } - loadstat[cardno] = 1; return (0); } @@ -2924,7 +2920,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len) len1 = len >> 1; ptr = (ushort *) moxaBuff; for (i = 0; i < len1; i++) - usum += le16_to_cpu(*(ptr + i)); + usum += *(ptr + i); retry = 0; do { len1 = len >> 1; @@ -2996,7 +2992,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor wlen = len >> 1; uptr = (ushort *) moxaBuff; for (i = 0; i < wlen; i++) - usum += le16_to_cpu(uptr[i]); + usum += uptr[i]; retry = 0; j = 0; do { diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c index bb0d9199e994..bfdb90242a90 100644 --- a/trunk/drivers/char/tty_io.c +++ b/trunk/drivers/char/tty_io.c @@ -153,15 +153,6 @@ int tty_ioctl(struct inode * inode, struct file * file, static int tty_fasync(int fd, struct file * filp, int on); static void release_mem(struct tty_struct *tty, int idx); -/** - * alloc_tty_struct - allocate a tty object - * - * Return a new empty tty structure. The data fields have not - * been initialized in any way but has been zeroed - * - * Locking: none - * FIXME: use kzalloc - */ static struct tty_struct *alloc_tty_struct(void) { @@ -175,15 +166,6 @@ static struct tty_struct *alloc_tty_struct(void) static void tty_buffer_free_all(struct tty_struct *); -/** - * free_tty_struct - free a disused tty - * @tty: tty struct to free - * - * Free the write buffers, tty queue and tty memory itself. - * - * Locking: none. Must be called after tty is definitely unused - */ - static inline void free_tty_struct(struct tty_struct *tty) { kfree(tty->write_buf); @@ -193,17 +175,6 @@ static inline void free_tty_struct(struct tty_struct *tty) #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) -/** - * tty_name - return tty naming - * @tty: tty structure - * @buf: buffer for output - * - * Convert a tty structure into a name. The name reflects the kernel - * naming policy and if udev is in use may not reflect user space - * - * Locking: none - */ - char *tty_name(struct tty_struct *tty, char *buf) { if (!tty) /* Hmm. NULL pointer. That's fun. */ @@ -264,28 +235,6 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) * Tty buffer allocation management */ - -/** - * tty_buffer_free_all - free buffers used by a tty - * @tty: tty to free from - * - * Remove all the buffers pending on a tty whether queued with data - * or in the free ring. Must be called when the tty is no longer in use - * - * Locking: none - */ - - -/** - * tty_buffer_free_all - free buffers used by a tty - * @tty: tty to free from - * - * Remove all the buffers pending on a tty whether queued with data - * or in the free ring. Must be called when the tty is no longer in use - * - * Locking: none - */ - static void tty_buffer_free_all(struct tty_struct *tty) { struct tty_buffer *thead; @@ -298,47 +247,19 @@ static void tty_buffer_free_all(struct tty_struct *tty) kfree(thead); } tty->buf.tail = NULL; - tty->buf.memory_used = 0; } -/** - * tty_buffer_init - prepare a tty buffer structure - * @tty: tty to initialise - * - * Set up the initial state of the buffer management for a tty device. - * Must be called before the other tty buffer functions are used. - * - * Locking: none - */ - static void tty_buffer_init(struct tty_struct *tty) { spin_lock_init(&tty->buf.lock); tty->buf.head = NULL; tty->buf.tail = NULL; tty->buf.free = NULL; - tty->buf.memory_used = 0; } -/** - * tty_buffer_alloc - allocate a tty buffer - * @tty: tty device - * @size: desired size (characters) - * - * Allocate a new tty buffer to hold the desired number of characters. - * Return NULL if out of memory or the allocation would exceed the - * per device queue - * - * Locking: Caller must hold tty->buf.lock - */ - -static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) +static struct tty_buffer *tty_buffer_alloc(size_t size) { - struct tty_buffer *p; - - if (tty->buf.memory_used + size > 65536) - return NULL; - p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); + struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); if(p == NULL) return NULL; p->used = 0; @@ -348,27 +269,17 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) p->read = 0; p->char_buf_ptr = (char *)(p->data); p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; - tty->buf.memory_used += size; +/* printk("Flip create %p\n", p); */ return p; } -/** - * tty_buffer_free - free a tty buffer - * @tty: tty owning the buffer - * @b: the buffer to free - * - * Free a tty buffer, or add it to the free list according to our - * internal strategy - * - * Locking: Caller must hold tty->buf.lock - */ +/* Must be called with the tty_read lock held. This needs to acquire strategy + code to decide if we should kfree or relink a given expired buffer */ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) { /* Dumb strategy for now - should keep some stats */ - tty->buf.memory_used -= b->size; - WARN_ON(tty->buf.memory_used < 0); - +/* printk("Flip dispose %p\n", b); */ if(b->size >= 512) kfree(b); else { @@ -377,18 +288,6 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) } } -/** - * tty_buffer_find - find a free tty buffer - * @tty: tty owning the buffer - * @size: characters wanted - * - * Locate an existing suitable tty buffer or if we are lacking one then - * allocate a new one. We round our buffers off in 256 character chunks - * to get better allocation behaviour. - * - * Locking: Caller must hold tty->buf.lock - */ - static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) { struct tty_buffer **tbh = &tty->buf.free; @@ -400,28 +299,20 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) t->used = 0; t->commit = 0; t->read = 0; - tty->buf.memory_used += t->size; + /* DEBUG ONLY */ +/* memset(t->data, '*', size); */ +/* printk("Flip recycle %p\n", t); */ return t; } tbh = &((*tbh)->next); } /* Round the buffer size out */ size = (size + 0xFF) & ~ 0xFF; - return tty_buffer_alloc(tty, size); + return tty_buffer_alloc(size); /* Should possibly check if this fails for the largest buffer we have queued and recycle that ? */ } -/** - * tty_buffer_request_room - grow tty buffer if needed - * @tty: tty structure - * @size: size desired - * - * Make at least size bytes of linear space available for the tty - * buffer. If we fail return the size we managed to find. - * - * Locking: Takes tty->buf.lock - */ int tty_buffer_request_room(struct tty_struct *tty, size_t size) { struct tty_buffer *b, *n; @@ -456,18 +347,6 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size) } EXPORT_SYMBOL_GPL(tty_buffer_request_room); -/** - * tty_insert_flip_string - Add characters to the tty buffer - * @tty: tty structure - * @chars: characters - * @size: size - * - * Queue a series of bytes to the tty buffering. All the characters - * passed are marked as without error. Returns the number added. - * - * Locking: Called functions may take tty->buf.lock - */ - int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size) { @@ -491,20 +370,6 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, } EXPORT_SYMBOL(tty_insert_flip_string); -/** - * tty_insert_flip_string_flags - Add characters to the tty buffer - * @tty: tty structure - * @chars: characters - * @flags: flag bytes - * @size: size - * - * Queue a series of bytes to the tty buffering. For each character - * the flags array indicates the status of the character. Returns the - * number added. - * - * Locking: Called functions may take tty->buf.lock - */ - int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size) { @@ -529,17 +394,6 @@ int tty_insert_flip_string_flags(struct tty_struct *tty, } EXPORT_SYMBOL(tty_insert_flip_string_flags); -/** - * tty_schedule_flip - push characters to ldisc - * @tty: tty to push from - * - * Takes any pending buffers and transfers their ownership to the - * ldisc side of the queue. It then schedules those characters for - * processing by the line discipline. - * - * Locking: Takes tty->buf.lock - */ - void tty_schedule_flip(struct tty_struct *tty) { unsigned long flags; @@ -551,19 +405,12 @@ void tty_schedule_flip(struct tty_struct *tty) } EXPORT_SYMBOL(tty_schedule_flip); -/** - * tty_prepare_flip_string - make room for characters - * @tty: tty - * @chars: return pointer for character write area - * @size: desired size - * +/* * Prepare a block of space in the buffer for data. Returns the length * available and buffer pointer to the space which is now allocated and * accounted for as ready for normal characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! - * - * Locking: May call functions taking tty->buf.lock */ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) @@ -580,20 +427,12 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); -/** - * tty_prepare_flip_string_flags - make room for characters - * @tty: tty - * @chars: return pointer for character write area - * @flags: return pointer for status flag write area - * @size: desired size - * +/* * Prepare a block of space in the buffer for data. Returns the length * available and buffer pointer to the space which is now allocated and * accounted for as ready for characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! - * - * Locking: May call functions taking tty->buf.lock */ int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) @@ -612,16 +451,10 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); -/** - * tty_set_termios_ldisc - set ldisc field - * @tty: tty structure - * @num: line discipline number - * +/* * This is probably overkill for real world processors but * they are not on hot paths so a little discipline won't do * any harm. - * - * Locking: takes termios_sem */ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) @@ -641,19 +474,6 @@ static DEFINE_SPINLOCK(tty_ldisc_lock); static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ -/** - * tty_register_ldisc - install a line discipline - * @disc: ldisc number - * @new_ldisc: pointer to the ldisc object - * - * Installs a new line discipline into the kernel. The discipline - * is set up as unreferenced and then made available to the kernel - * from this point onwards. - * - * Locking: - * takes tty_ldisc_lock to guard against ldisc races - */ - int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) { unsigned long flags; @@ -673,18 +493,6 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) } EXPORT_SYMBOL(tty_register_ldisc); -/** - * tty_unregister_ldisc - unload a line discipline - * @disc: ldisc number - * @new_ldisc: pointer to the ldisc object - * - * Remove a line discipline from the kernel providing it is not - * currently in use. - * - * Locking: - * takes tty_ldisc_lock to guard against ldisc races - */ - int tty_unregister_ldisc(int disc) { unsigned long flags; @@ -704,19 +512,6 @@ int tty_unregister_ldisc(int disc) } EXPORT_SYMBOL(tty_unregister_ldisc); -/** - * tty_ldisc_get - take a reference to an ldisc - * @disc: ldisc number - * - * Takes a reference to a line discipline. Deals with refcounts and - * module locking counts. Returns NULL if the discipline is not available. - * Returns a pointer to the discipline and bumps the ref count if it is - * available - * - * Locking: - * takes tty_ldisc_lock to guard against ldisc races - */ - struct tty_ldisc *tty_ldisc_get(int disc) { unsigned long flags; @@ -745,17 +540,6 @@ struct tty_ldisc *tty_ldisc_get(int disc) EXPORT_SYMBOL_GPL(tty_ldisc_get); -/** - * tty_ldisc_put - drop ldisc reference - * @disc: ldisc number - * - * Drop a reference to a line discipline. Manage refcounts and - * module usage counts - * - * Locking: - * takes tty_ldisc_lock to guard against ldisc races - */ - void tty_ldisc_put(int disc) { struct tty_ldisc *ld; @@ -773,19 +557,6 @@ void tty_ldisc_put(int disc) EXPORT_SYMBOL_GPL(tty_ldisc_put); -/** - * tty_ldisc_assign - set ldisc on a tty - * @tty: tty to assign - * @ld: line discipline - * - * Install an instance of a line discipline into a tty structure. The - * ldisc must have a reference count above zero to ensure it remains/ - * The tty instance refcount starts at zero. - * - * Locking: - * Caller must hold references - */ - static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) { tty->ldisc = *ld; @@ -800,8 +571,6 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) * the tty ldisc. Return 0 on failure or 1 on success. This is * used to implement both the waiting and non waiting versions * of tty_ldisc_ref - * - * Locking: takes tty_ldisc_lock */ static int tty_ldisc_try(struct tty_struct *tty) @@ -833,8 +602,6 @@ static int tty_ldisc_try(struct tty_struct *tty) * must also be careful not to hold other locks that will deadlock * against a discipline change, such as an existing ldisc reference * (which we check for) - * - * Locking: call functions take tty_ldisc_lock */ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) @@ -855,8 +622,6 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); * Dereference the line discipline for the terminal and take a * reference to it. If the line discipline is in flux then * return NULL. Can be called from IRQ and timer functions. - * - * Locking: called functions take tty_ldisc_lock */ struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) @@ -874,8 +639,6 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref); * * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May * be called in IRQ context. - * - * Locking: takes tty_ldisc_lock */ void tty_ldisc_deref(struct tty_ldisc *ld) @@ -920,9 +683,6 @@ static void tty_ldisc_enable(struct tty_struct *tty) * * Set the discipline of a tty line. Must be called from a process * context. - * - * Locking: takes tty_ldisc_lock. - * called functions take termios_sem */ static int tty_set_ldisc(struct tty_struct *tty, int ldisc) @@ -1086,17 +846,9 @@ static int tty_set_ldisc(struct tty_struct *tty, int ldisc) return retval; } -/** - * get_tty_driver - find device of a tty - * @dev_t: device identifier - * @index: returns the index of the tty - * - * This routine returns a tty driver structure, given a device number - * and also passes back the index number. - * - * Locking: caller must hold tty_mutex +/* + * This routine returns a tty driver structure, given a device number */ - static struct tty_driver *get_tty_driver(dev_t device, int *index) { struct tty_driver *p; @@ -1111,17 +863,11 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index) return NULL; } -/** - * tty_check_change - check for POSIX terminal changes - * @tty: tty to check - * - * If we try to write to, or set the state of, a terminal and we're - * not in the foreground, send a SIGTTOU. If the signal is blocked or - * ignored, go ahead and perform the operation. (POSIX 7.2) - * - * Locking: none +/* + * If we try to write to, or set the state of, a terminal and we're + * not in the foreground, send a SIGTTOU. If the signal is blocked or + * ignored, go ahead and perform the operation. (POSIX 7.2) */ - int tty_check_change(struct tty_struct * tty) { if (current->signal->tty != tty) @@ -1259,27 +1005,10 @@ void tty_ldisc_flush(struct tty_struct *tty) EXPORT_SYMBOL_GPL(tty_ldisc_flush); -/** - * do_tty_hangup - actual handler for hangup events - * @data: tty device - * - * This can be called by the "eventd" kernel thread. That is process - * synchronous but doesn't hold any locks, so we need to make sure we - * have the appropriate locks for what we're doing. - * - * The hangup event clears any pending redirections onto the hung up - * device. It ensures future writes will error and it does the needed - * line discipline hangup and signal delivery. The tty object itself - * remains intact. - * - * Locking: - * BKL - * redirect lock for undoing redirection - * file list lock for manipulating list of ttys - * tty_ldisc_lock from called functions - * termios_sem resetting termios data - * tasklist_lock to walk task list for hangup event - * +/* + * This can be called by the "eventd" kernel thread. That is process synchronous, + * but doesn't hold any locks, so we need to make sure we have the appropriate + * locks for what we're doing.. */ static void do_tty_hangup(void *data) { @@ -1404,14 +1133,6 @@ static void do_tty_hangup(void *data) fput(f); } -/** - * tty_hangup - trigger a hangup event - * @tty: tty to hangup - * - * A carrier loss (virtual or otherwise) has occurred on this like - * schedule a hangup sequence to run after this event. - */ - void tty_hangup(struct tty_struct * tty) { #ifdef TTY_DEBUG_HANGUP @@ -1424,15 +1145,6 @@ void tty_hangup(struct tty_struct * tty) EXPORT_SYMBOL(tty_hangup); -/** - * tty_vhangup - process vhangup - * @tty: tty to hangup - * - * The user has asked via system call for the terminal to be hung up. - * We do this synchronously so that when the syscall returns the process - * is complete. That guarantee is neccessary for security reasons. - */ - void tty_vhangup(struct tty_struct * tty) { #ifdef TTY_DEBUG_HANGUP @@ -1444,14 +1156,6 @@ void tty_vhangup(struct tty_struct * tty) } EXPORT_SYMBOL(tty_vhangup); -/** - * tty_hung_up_p - was tty hung up - * @filp: file pointer of tty - * - * Return true if the tty has been subject to a vhangup or a carrier - * loss - */ - int tty_hung_up_p(struct file * filp) { return (filp->f_op == &hung_up_tty_fops); @@ -1459,28 +1163,19 @@ int tty_hung_up_p(struct file * filp) EXPORT_SYMBOL(tty_hung_up_p); -/** - * disassociate_ctty - disconnect controlling tty - * @on_exit: true if exiting so need to "hang up" the session - * - * This function is typically called only by the session leader, when - * it wants to disassociate itself from its controlling tty. +/* + * This function is typically called only by the session leader, when + * it wants to disassociate itself from its controlling tty. * - * It performs the following functions: + * It performs the following functions: * (1) Sends a SIGHUP and SIGCONT to the foreground process group * (2) Clears the tty from being controlling the session * (3) Clears the controlling tty for all processes in the * session group. * - * The argument on_exit is set to 1 if called when a process is - * exiting; it is 0 if called by the ioctl TIOCNOTTY. - * - * Locking: tty_mutex is taken to protect current->signal->tty - * BKL is taken for hysterical raisins - * Tasklist lock is taken (under tty_mutex) to walk process - * lists for the session. + * The argument on_exit is set to 1 if called when a process is + * exiting; it is 0 if called by the ioctl TIOCNOTTY. */ - void disassociate_ctty(int on_exit) { struct tty_struct *tty; @@ -1527,25 +1222,6 @@ void disassociate_ctty(int on_exit) unlock_kernel(); } - -/** - * stop_tty - propogate flow control - * @tty: tty to stop - * - * Perform flow control to the driver. For PTY/TTY pairs we - * must also propogate the TIOCKPKT status. May be called - * on an already stopped device and will not re-call the driver - * method. - * - * This functionality is used by both the line disciplines for - * halting incoming flow and by the driver. It may therefore be - * called from any context, may be under the tty atomic_write_lock - * but not always. - * - * Locking: - * Broken. Relies on BKL which is unsafe here. - */ - void stop_tty(struct tty_struct *tty) { if (tty->stopped) @@ -1562,19 +1238,6 @@ void stop_tty(struct tty_struct *tty) EXPORT_SYMBOL(stop_tty); -/** - * start_tty - propogate flow control - * @tty: tty to start - * - * Start a tty that has been stopped if at all possible. Perform - * any neccessary wakeups and propogate the TIOCPKT status. If this - * is the tty was previous stopped and is being started then the - * driver start method is invoked and the line discipline woken. - * - * Locking: - * Broken. Relies on BKL which is unsafe here. - */ - void start_tty(struct tty_struct *tty) { if (!tty->stopped || tty->flow_stopped) @@ -1595,23 +1258,6 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); -/** - * tty_read - read method for tty device files - * @file: pointer to tty file - * @buf: user buffer - * @count: size of user buffer - * @ppos: unused - * - * Perform the read system call function on this terminal device. Checks - * for hung up devices before calling the line discipline method. - * - * Locking: - * Locks the line discipline internally while needed - * For historical reasons the line discipline read method is - * invoked under the BKL. This will go away in time so do not rely on it - * in new code. Multiple read calls may be outstanding in parallel. - */ - static ssize_t tty_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { @@ -1656,7 +1302,6 @@ static inline ssize_t do_tty_write( ssize_t ret = 0, written = 0; unsigned int chunk; - /* FIXME: O_NDELAY ... */ if (mutex_lock_interruptible(&tty->atomic_write_lock)) { return -ERESTARTSYS; } @@ -1673,9 +1318,6 @@ static inline ssize_t do_tty_write( * layer has problems with bigger chunks. It will * claim to be able to handle more characters than * it actually does. - * - * FIXME: This can probably go away now except that 64K chunks - * are too likely to fail unless switched to vmalloc... */ chunk = 2048; if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) @@ -1733,24 +1375,6 @@ static inline ssize_t do_tty_write( } -/** - * tty_write - write method for tty device file - * @file: tty file pointer - * @buf: user data to write - * @count: bytes to write - * @ppos: unused - * - * Write data to a tty device via the line discipline. - * - * Locking: - * Locks the line discipline as required - * Writes to the tty driver are serialized by the atomic_write_lock - * and are then processed in chunks to the device. The line discipline - * write method will not be involked in parallel for each device - * The line discipline write method is called under the big - * kernel lock for historical reasons. New code should not rely on this. - */ - static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { @@ -1798,18 +1422,7 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t static char ptychar[] = "pqrstuvwxyzabcde"; -/** - * pty_line_name - generate name for a pty - * @driver: the tty driver in use - * @index: the minor number - * @p: output buffer of at least 6 bytes - * - * Generate a name from a driver reference and write it to the output - * buffer. - * - * Locking: None - */ -static void pty_line_name(struct tty_driver *driver, int index, char *p) +static inline void pty_line_name(struct tty_driver *driver, int index, char *p) { int i = index + driver->name_base; /* ->name is initialized to "ttyp", but "tty" is expected */ @@ -1818,53 +1431,24 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) ptychar[i >> 4 & 0xf], i & 0xf); } -/** - * pty_line_name - generate name for a tty - * @driver: the tty driver in use - * @index: the minor number - * @p: output buffer of at least 7 bytes - * - * Generate a name from a driver reference and write it to the output - * buffer. - * - * Locking: None - */ -static void tty_line_name(struct tty_driver *driver, int index, char *p) +static inline void tty_line_name(struct tty_driver *driver, int index, char *p) { sprintf(p, "%s%d", driver->name, index + driver->name_base); } -/** - * init_dev - initialise a tty device - * @driver: tty driver we are opening a device on - * @idx: device index - * @tty: returned tty structure - * - * Prepare a tty device. This may not be a "new" clean device but - * could also be an active device. The pty drivers require special - * handling because of this. - * - * Locking: - * The function is called under the tty_mutex, which - * protects us from the tty struct or driver itself going away. - * - * On exit the tty device has the line discipline attached and - * a reference count of 1. If a pair was created for pty/tty use - * and the other was a pty master then it too has a reference count of 1. - * +/* * WSH 06/09/97: Rewritten to remove races and properly clean up after a * failed open. The new code protects the open with a mutex, so it's * really quite straightforward. The mutex locking can probably be * relaxed for the (most common) case of reopening a tty. */ - static int init_dev(struct tty_driver *driver, int idx, struct tty_struct **ret_tty) { struct tty_struct *tty, *o_tty; struct termios *tp, **tp_loc, *o_tp, **o_tp_loc; struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc; - int retval = 0; + int retval=0; /* check whether we're reopening an existing tty */ if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { @@ -2078,20 +1662,10 @@ static int init_dev(struct tty_driver *driver, int idx, goto end_init; } -/** - * release_mem - release tty structure memory - * - * Releases memory associated with a tty structure, and clears out the - * driver table slots. This function is called when a device is no longer - * in use. It also gets called when setup of a device fails. - * - * Locking: - * tty_mutex - sometimes only - * takes the file list lock internally when working on the list - * of ttys that the driver keeps. - * FIXME: should we require tty_mutex is held here ?? +/* + * Releases memory associated with a tty structure, and clears out the + * driver table slots. */ - static void release_mem(struct tty_struct *tty, int idx) { struct tty_struct *o_tty; @@ -2432,27 +2006,18 @@ static void release_dev(struct file * filp) } -/** - * tty_open - open a tty device - * @inode: inode of device file - * @filp: file pointer to tty - * - * tty_open and tty_release keep up the tty count that contains the - * number of opens done on a tty. We cannot use the inode-count, as - * different inodes might point to the same tty. - * - * Open-counting is needed for pty masters, as well as for keeping - * track of serial lines: DTR is dropped when the last close happens. - * (This is not done solely through tty->count, now. - Ted 1/27/92) +/* + * tty_open and tty_release keep up the tty count that contains the + * number of opens done on a tty. We cannot use the inode-count, as + * different inodes might point to the same tty. * - * The termios state of a pty is reset on first open so that - * settings don't persist across reuse. + * Open-counting is needed for pty masters, as well as for keeping + * track of serial lines: DTR is dropped when the last close happens. + * (This is not done solely through tty->count, now. - Ted 1/27/92) * - * Locking: tty_mutex protects current->signal->tty, get_tty_driver and - * init_dev work. tty->count should protect the rest. - * task_lock is held to update task details for sessions + * The termios state of a pty is reset on first open so that + * settings don't persist across reuse. */ - static int tty_open(struct inode * inode, struct file * filp) { struct tty_struct *tty; @@ -2567,18 +2132,6 @@ static int tty_open(struct inode * inode, struct file * filp) } #ifdef CONFIG_UNIX98_PTYS -/** - * ptmx_open - open a unix 98 pty master - * @inode: inode of device file - * @filp: file pointer to tty - * - * Allocate a unix98 pty master device from the ptmx driver. - * - * Locking: tty_mutex protects theinit_dev work. tty->count should - protect the rest. - * allocated_ptys_lock handles the list of free pty numbers - */ - static int ptmx_open(struct inode * inode, struct file * filp) { struct tty_struct *tty; @@ -2638,18 +2191,6 @@ static int ptmx_open(struct inode * inode, struct file * filp) } #endif -/** - * tty_release - vfs callback for close - * @inode: inode of tty - * @filp: file pointer for handle to tty - * - * Called the last time each file handle is closed that references - * this tty. There may however be several such references. - * - * Locking: - * Takes bkl. See release_dev - */ - static int tty_release(struct inode * inode, struct file * filp) { lock_kernel(); @@ -2658,18 +2199,7 @@ static int tty_release(struct inode * inode, struct file * filp) return 0; } -/** - * tty_poll - check tty status - * @filp: file being polled - * @wait: poll wait structures to update - * - * Call the line discipline polling method to obtain the poll - * status of the device. - * - * Locking: locks called line discipline but ldisc poll method - * may be re-entered freely by other callers. - */ - +/* No kernel lock held - fine */ static unsigned int tty_poll(struct file * filp, poll_table * wait) { struct tty_struct * tty; @@ -2713,21 +2243,6 @@ static int tty_fasync(int fd, struct file * filp, int on) return 0; } -/** - * tiocsti - fake input character - * @tty: tty to fake input into - * @p: pointer to character - * - * Fake input to a tty device. Does the neccessary locking and - * input management. - * - * FIXME: does not honour flow control ?? - * - * Locking: - * Called functions take tty_ldisc_lock - * current->signal->tty check is safe without locks - */ - static int tiocsti(struct tty_struct *tty, char __user *p) { char ch, mbz = 0; @@ -2743,18 +2258,6 @@ static int tiocsti(struct tty_struct *tty, char __user *p) return 0; } -/** - * tiocgwinsz - implement window query ioctl - * @tty; tty - * @arg: user buffer for result - * - * Copies the kernel idea of the window size into the user buffer. No - * locking is done. - * - * FIXME: Returning random values racing a window size set is wrong - * should lock here against that - */ - static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) { if (copy_to_user(arg, &tty->winsize, sizeof(*arg))) @@ -2762,24 +2265,6 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) return 0; } -/** - * tiocswinsz - implement window size set ioctl - * @tty; tty - * @arg: user buffer for result - * - * Copies the user idea of the window size to the kernel. Traditionally - * this is just advisory information but for the Linux console it - * actually has driver level meaning and triggers a VC resize. - * - * Locking: - * The console_sem is used to ensure we do not try and resize - * the console twice at once. - * FIXME: Two racing size sets may leave the console and kernel - * parameters disagreeing. Is this exploitable ? - * FIXME: Random values racing a window size get is wrong - * should lock here against that - */ - static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, struct winsize __user * arg) { @@ -2809,15 +2294,6 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, return 0; } -/** - * tioccons - allow admin to move logical console - * @file: the file to become console - * - * Allow the adminstrator to move the redirected console device - * - * Locking: uses redirect_lock to guard the redirect information - */ - static int tioccons(struct file *file) { if (!capable(CAP_SYS_ADMIN)) @@ -2843,17 +2319,6 @@ static int tioccons(struct file *file) return 0; } -/** - * fionbio - non blocking ioctl - * @file: file to set blocking value - * @p: user parameter - * - * Historical tty interfaces had a blocking control ioctl before - * the generic functionality existed. This piece of history is preserved - * in the expected tty API of posix OS's. - * - * Locking: none, the open fle handle ensures it won't go away. - */ static int fionbio(struct file *file, int __user *p) { @@ -2869,23 +2334,6 @@ static int fionbio(struct file *file, int __user *p) return 0; } -/** - * tiocsctty - set controlling tty - * @tty: tty structure - * @arg: user argument - * - * This ioctl is used to manage job control. It permits a session - * leader to set this tty as the controlling tty for the session. - * - * Locking: - * Takes tasklist lock internally to walk sessions - * Takes task_lock() when updating signal->tty - * - * FIXME: tty_mutex is needed to protect signal->tty references. - * FIXME: why task_lock on the signal->tty reference ?? - * - */ - static int tiocsctty(struct tty_struct *tty, int arg) { struct task_struct *p; @@ -2926,18 +2374,6 @@ static int tiocsctty(struct tty_struct *tty, int arg) return 0; } -/** - * tiocgpgrp - get process group - * @tty: tty passed by user - * @real_tty: tty side of the tty pased by the user if a pty else the tty - * @p: returned pid - * - * Obtain the process group of the tty. If there is no process group - * return an error. - * - * Locking: none. Reference to ->signal->tty is safe. - */ - static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { /* @@ -2949,20 +2385,6 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return put_user(real_tty->pgrp, p); } -/** - * tiocspgrp - attempt to set process group - * @tty: tty passed by user - * @real_tty: tty side device matching tty passed by user - * @p: pid pointer - * - * Set the process group of the tty to the session passed. Only - * permitted where the tty session is our session. - * - * Locking: None - * - * FIXME: current->signal->tty referencing is unsafe. - */ - static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { pid_t pgrp; @@ -2986,18 +2408,6 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return 0; } -/** - * tiocgsid - get session id - * @tty: tty passed by user - * @real_tty: tty side of the tty pased by the user if a pty else the tty - * @p: pointer to returned session id - * - * Obtain the session id of the tty. If there is no session - * return an error. - * - * Locking: none. Reference to ->signal->tty is safe. - */ - static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { /* @@ -3011,16 +2421,6 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _ return put_user(real_tty->session, p); } -/** - * tiocsetd - set line discipline - * @tty: tty device - * @p: pointer to user data - * - * Set the line discipline according to user request. - * - * Locking: see tty_set_ldisc, this function is just a helper - */ - static int tiocsetd(struct tty_struct *tty, int __user *p) { int ldisc; @@ -3030,21 +2430,6 @@ static int tiocsetd(struct tty_struct *tty, int __user *p) return tty_set_ldisc(tty, ldisc); } -/** - * send_break - performed time break - * @tty: device to break on - * @duration: timeout in mS - * - * Perform a timed break on hardware that lacks its own driver level - * timed break functionality. - * - * Locking: - * None - * - * FIXME: - * What if two overlap - */ - static int send_break(struct tty_struct *tty, unsigned int duration) { tty->driver->break_ctl(tty, -1); @@ -3057,19 +2442,8 @@ static int send_break(struct tty_struct *tty, unsigned int duration) return 0; } -/** - * tiocmget - get modem status - * @tty: tty device - * @file: user file pointer - * @p: pointer to result - * - * Obtain the modem status bits from the tty driver if the feature - * is supported. Return -EINVAL if it is not available. - * - * Locking: none (up to the driver) - */ - -static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) +static int +tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) { int retval = -EINVAL; @@ -3082,20 +2456,8 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p return retval; } -/** - * tiocmset - set modem status - * @tty: tty device - * @file: user file pointer - * @cmd: command - clear bits, set bits or set all - * @p: pointer to desired bits - * - * Set the modem status bits from the tty driver if the feature - * is supported. Return -EINVAL if it is not available. - * - * Locking: none (up to the driver) - */ - -static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, +static int +tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned __user *p) { int retval = -EINVAL; @@ -3211,7 +2573,6 @@ int tty_ioctl(struct inode * inode, struct file * file, clear_bit(TTY_EXCLUSIVE, &tty->flags); return 0; case TIOCNOTTY: - /* FIXME: taks lock or tty_mutex ? */ if (current->signal->tty != tty) return -ENOTTY; if (current->signal->leader) @@ -3392,16 +2753,9 @@ void do_SAK(struct tty_struct *tty) EXPORT_SYMBOL(do_SAK); -/** - * flush_to_ldisc - * @private_: tty structure passed from work queue. - * - * This routine is called out of the software interrupt to flush data - * from the buffer chain to the line discipline. - * - * Locking: holds tty->buf.lock to guard buffer list. Drops the lock - * while invoking the line discipline receive_buf method. The - * receive_buf method is single threaded for each tty instance. +/* + * This routine is called out of the software interrupt to flush data + * from the buffer chain to the line discipline. */ static void flush_to_ldisc(void *private_) @@ -3477,8 +2831,6 @@ static int n_baud_table = ARRAY_SIZE(baud_table); * Convert termios baud rate data into a speed. This should be called * with the termios lock held if this termios is a terminal termios * structure. May change the termios data. - * - * Locking: none */ int tty_termios_baud_rate(struct termios *termios) @@ -3507,8 +2859,6 @@ EXPORT_SYMBOL(tty_termios_baud_rate); * Returns the baud rate as an integer for this terminal. The * termios lock must be held by the caller and the terminal bit * flags may be updated. - * - * Locking: none */ int tty_get_baud_rate(struct tty_struct *tty) @@ -3538,8 +2888,6 @@ EXPORT_SYMBOL(tty_get_baud_rate); * * In the event of the queue being busy for flipping the work will be * held off and retried later. - * - * Locking: tty buffer lock. Driver locks in low latency mode. */ void tty_flip_buffer_push(struct tty_struct *tty) @@ -3559,16 +2907,9 @@ void tty_flip_buffer_push(struct tty_struct *tty) EXPORT_SYMBOL(tty_flip_buffer_push); -/** - * initialize_tty_struct - * @tty: tty to initialize - * - * This subroutine initializes a tty structure that has been newly - * allocated. - * - * Locking: none - tty in question must not be exposed at this point +/* + * This subroutine initializes a tty structure. */ - static void initialize_tty_struct(struct tty_struct *tty) { memset(tty, 0, sizeof(struct tty_struct)); @@ -3594,7 +2935,6 @@ static void initialize_tty_struct(struct tty_struct *tty) /* * The default put_char routine if the driver did not define one. */ - static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) { tty->driver->write(tty, &ch, 1); @@ -3603,23 +2943,19 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) static struct class *tty_class; /** - * tty_register_device - register a tty device - * @driver: the tty driver that describes the tty device - * @index: the index in the tty driver for this tty device - * @device: a struct device that is associated with this tty device. - * This field is optional, if there is no known struct device - * for this tty device it can be set to NULL safely. + * tty_register_device - register a tty device + * @driver: the tty driver that describes the tty device + * @index: the index in the tty driver for this tty device + * @device: a struct device that is associated with this tty device. + * This field is optional, if there is no known struct device for this + * tty device it can be set to NULL safely. * - * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). + * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). * - * This call is required to be made to register an individual tty device - * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If - * that bit is not set, this function should not be called by a tty - * driver. - * - * Locking: ?? + * This call is required to be made to register an individual tty device if + * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that + * bit is not set, this function should not be called by a tty driver. */ - struct class_device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *device) { @@ -3641,16 +2977,13 @@ struct class_device *tty_register_device(struct tty_driver *driver, } /** - * tty_unregister_device - unregister a tty device - * @driver: the tty driver that describes the tty device - * @index: the index in the tty driver for this tty device + * tty_unregister_device - unregister a tty device + * @driver: the tty driver that describes the tty device + * @index: the index in the tty driver for this tty device * - * If a tty device is registered with a call to tty_register_device() then - * this function must be called when the tty device is gone. - * - * Locking: ?? + * If a tty device is registered with a call to tty_register_device() then + * this function must be made when the tty device is gone. */ - void tty_unregister_device(struct tty_driver *driver, unsigned index) { class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); @@ -3761,6 +3094,7 @@ int tty_register_driver(struct tty_driver *driver) driver->cdev.owner = driver->owner; error = cdev_add(&driver->cdev, dev, driver->num); if (error) { + cdev_del(&driver->cdev); unregister_chrdev_region(dev, driver->num); driver->ttys = NULL; driver->termios = driver->termios_locked = NULL; diff --git a/trunk/drivers/char/tty_ioctl.c b/trunk/drivers/char/tty_ioctl.c index 4ad47d321bd4..f19cf9d7792d 100644 --- a/trunk/drivers/char/tty_ioctl.c +++ b/trunk/drivers/char/tty_ioctl.c @@ -36,18 +36,6 @@ #define TERMIOS_WAIT 2 #define TERMIOS_TERMIO 4 - -/** - * tty_wait_until_sent - wait for I/O to finish - * @tty: tty we are waiting for - * @timeout: how long we will wait - * - * Wait for characters pending in a tty driver to hit the wire, or - * for a timeout to occur (eg due to flow control) - * - * Locking: none - */ - void tty_wait_until_sent(struct tty_struct * tty, long timeout) { DECLARE_WAITQUEUE(wait, current); @@ -106,18 +94,6 @@ static void unset_locked_termios(struct termios *termios, old->c_cc[i] : termios->c_cc[i]; } -/** - * change_termios - update termios values - * @tty: tty to update - * @new_termios: desired new value - * - * Perform updates to the termios values set on this terminal. There - * is a bit of layering violation here with n_tty in terms of the - * internal knowledge of this function. - * - * Locking: termios_sem - */ - static void change_termios(struct tty_struct * tty, struct termios * new_termios) { int canon_change; @@ -179,19 +155,6 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios up(&tty->termios_sem); } -/** - * set_termios - set termios values for a tty - * @tty: terminal device - * @arg: user data - * @opt: option information - * - * Helper function to prepare termios data and run neccessary other - * functions before using change_termios to do the actual changes. - * - * Locking: - * Called functions take ldisc and termios_sem locks - */ - static int set_termios(struct tty_struct * tty, void __user *arg, int opt) { struct termios tmp_termios; @@ -321,17 +284,6 @@ static void set_sgflags(struct termios * termios, int flags) } } -/** - * set_sgttyb - set legacy terminal values - * @tty: tty structure - * @sgttyb: pointer to old style terminal structure - * - * Updates a terminal from the legacy BSD style terminal information - * structure. - * - * Locking: termios_sem - */ - static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) { int retval; @@ -417,16 +369,9 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars) } #endif -/** - * send_prio_char - send priority character - * - * Send a high priority character to the tty even if stopped - * - * Locking: none - * - * FIXME: overlapping calls with start/stop tty lose state of tty +/* + * Send a high priority character to the tty. */ - static void send_prio_char(struct tty_struct *tty, char ch) { int was_stopped = tty->stopped; diff --git a/trunk/drivers/char/vt_ioctl.c b/trunk/drivers/char/vt_ioctl.c index a5628a8b6620..eccffaf26faa 100644 --- a/trunk/drivers/char/vt_ioctl.c +++ b/trunk/drivers/char/vt_ioctl.c @@ -1011,8 +1011,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, return -EPERM; vt_dont_switch = 0; return 0; - case VT_GETHIFONTMASK: - return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); default: return -ENOIOCTLCMD; } diff --git a/trunk/drivers/hwmon/abituguru.c b/trunk/drivers/hwmon/abituguru.c index 35ad1b032726..cc15c4f2e9ec 100644 --- a/trunk/drivers/hwmon/abituguru.c +++ b/trunk/drivers/hwmon/abituguru.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -65,17 +64,17 @@ #define ABIT_UGURU_IN_SENSOR 0 #define ABIT_UGURU_TEMP_SENSOR 1 #define ABIT_UGURU_NC 2 -/* In many cases we need to wait for the uGuru to reach a certain status, most - of the time it will reach this status within 30 - 90 ISA reads, and thus we - can best busy wait. This define gives the total amount of reads to try. */ -#define ABIT_UGURU_WAIT_TIMEOUT 125 -/* However sometimes older versions of the uGuru seem to be distracted and they - do not respond for a long time. To handle this we sleep before each of the - last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */ -#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5 +/* Timeouts / Retries, if these turn out to need a lot of fiddling we could + convert them to params. */ +/* 250 was determined by trial and error, 200 works most of the time, but not + always. I assume this is cpu-speed independent, since the ISA-bus and not + the CPU should be the bottleneck. Note that 250 sometimes is still not + enough (only reported on AN7 mb) this is handled by a higher layer. */ +#define ABIT_UGURU_WAIT_TIMEOUT 250 /* Normally all expected status in abituguru_ready, are reported after the - first read, but sometimes not and we need to poll. */ -#define ABIT_UGURU_READY_TIMEOUT 5 + first read, but sometimes not and we need to poll, 5 polls was not enough + 50 sofar is. */ +#define ABIT_UGURU_READY_TIMEOUT 50 /* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */ #define ABIT_UGURU_MAX_RETRIES 3 #define ABIT_UGURU_RETRY_DELAY (HZ/5) @@ -227,10 +226,6 @@ static int abituguru_wait(struct abituguru_data *data, u8 state) timeout--; if (timeout == 0) return -EBUSY; - /* sleep a bit before our last few tries, see the comment on - this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */ - if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP) - msleep(0); } return 0; } @@ -261,7 +256,6 @@ static int abituguru_ready(struct abituguru_data *data) "CMD reg does not hold 0xAC after ready command\n"); return -EIO; } - msleep(0); } /* After this the ABIT_UGURU_DATA port should contain @@ -274,7 +268,6 @@ static int abituguru_ready(struct abituguru_data *data) "state != more input after ready command\n"); return -EIO; } - msleep(0); } data->uguru_ready = 1; @@ -338,8 +331,7 @@ static int abituguru_read(struct abituguru_data *data, /* And read the data */ for (i = 0; i < count; i++) { if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { - ABIT_UGURU_DEBUG(retries ? 1 : 3, - "timeout exceeded waiting for " + ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for " "read state (bank: %d, sensor: %d)\n", (int)bank_addr, (int)sensor_addr); break; @@ -358,9 +350,7 @@ static int abituguru_read(struct abituguru_data *data, static int abituguru_write(struct abituguru_data *data, u8 bank_addr, u8 sensor_addr, u8 *buf, int count) { - /* We use the ready timeout as we have to wait for 0xAC just like the - ready function */ - int i, timeout = ABIT_UGURU_READY_TIMEOUT; + int i; /* Send the address */ i = abituguru_send_address(data, bank_addr, sensor_addr, @@ -380,8 +370,7 @@ static int abituguru_write(struct abituguru_data *data, } /* Now we need to wait till the chip is ready to be read again, - so that we can read 0xAC as confirmation that our write has - succeeded. */ + don't ask why */ if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state " "after write (bank: %d, sensor: %d)\n", (int)bank_addr, @@ -390,15 +379,11 @@ static int abituguru_write(struct abituguru_data *data, } /* Cmd port MUST be read now and should contain 0xAC */ - while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { - timeout--; - if (timeout == 0) { - ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after " - "write (bank: %d, sensor: %d)\n", - (int)bank_addr, (int)sensor_addr); - return -EIO; - } - msleep(0); + if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { + ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write " + "(bank: %d, sensor: %d)\n", (int)bank_addr, + (int)sensor_addr); + return -EIO; } /* Last put the chip back in ready state */ @@ -418,7 +403,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, u8 sensor_addr) { u8 val, buf[3]; - int i, ret = -ENODEV; /* error is the most common used retval :| */ + int ret = ABIT_UGURU_NC; /* If overriden by the user return the user selected type */ if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR && @@ -454,7 +439,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, buf[2] = 250; if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, buf, 3) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; /* Now we need 20 ms to give the uguru time to read the sensors and raise a voltage alarm */ set_current_state(TASK_UNINTERRUPTIBLE); @@ -462,16 +447,21 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, /* Check for alarm and check the alarm is a volt low alarm. */ if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, sensor_addr, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) { + /* Restore original settings */ + if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, + sensor_addr, + data->bank1_settings[sensor_addr], + 3) != 3) + return -ENODEV; ABIT_UGURU_DEBUG(2, " found volt sensor\n"); - ret = ABIT_UGURU_IN_SENSOR; - goto abituguru_detect_bank1_sensor_type_exit; + return ABIT_UGURU_IN_SENSOR; } else ABIT_UGURU_DEBUG(2, " alarm raised during volt " "sensor test, but volt low flag not set\n"); @@ -487,7 +477,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, buf[2] = 10; if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, buf, 3) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; /* Now we need 50 ms to give the uguru time to read the sensors and raise a temp alarm */ set_current_state(TASK_UNINTERRUPTIBLE); @@ -495,16 +485,15 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, /* Check for alarm and check the alarm is a temp high alarm. */ if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, sensor_addr, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - goto abituguru_detect_bank1_sensor_type_exit; + return -ENODEV; if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) { - ABIT_UGURU_DEBUG(2, " found temp sensor\n"); ret = ABIT_UGURU_TEMP_SENSOR; - goto abituguru_detect_bank1_sensor_type_exit; + ABIT_UGURU_DEBUG(2, " found temp sensor\n"); } else ABIT_UGURU_DEBUG(2, " alarm raised during temp " "sensor test, but temp high flag not set\n"); @@ -512,23 +501,11 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor " "test\n"); - ret = ABIT_UGURU_NC; -abituguru_detect_bank1_sensor_type_exit: - /* Restore original settings, failing here is really BAD, it has been - reported that some BIOS-es hang when entering the uGuru menu with - invalid settings present in the uGuru, so we try this 3 times. */ - for (i = 0; i < 3; i++) - if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, - sensor_addr, data->bank1_settings[sensor_addr], - 3) == 3) - break; - if (i == 3) { - printk(KERN_ERR ABIT_UGURU_NAME - ": Fatal error could not restore original settings. " - "This should never happen please report this to the " - "abituguru maintainer (see MAINTAINERS)\n"); + /* Restore original settings */ + if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, + data->bank1_settings[sensor_addr], 3) != 3) return -ENODEV; - } + return ret; } @@ -1328,7 +1305,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev) data->update_timeouts = 0; LEAVE_UPDATE: /* handle timeout condition */ - if (!success && (err == -EBUSY || err >= 0)) { + if (err == -EBUSY) { /* No overflow please */ if (data->update_timeouts < 255u) data->update_timeouts++; diff --git a/trunk/drivers/i2c/chips/tps65010.c b/trunk/drivers/i2c/chips/tps65010.c index 0be6fd6a267d..e7e27049fbfa 100644 --- a/trunk/drivers/i2c/chips/tps65010.c +++ b/trunk/drivers/i2c/chips/tps65010.c @@ -43,12 +43,13 @@ /*-------------------------------------------------------------------------*/ #define DRIVER_VERSION "2 May 2005" -#define DRIVER_NAME (tps65010_driver.driver.name) +#define DRIVER_NAME (tps65010_driver.name) MODULE_DESCRIPTION("TPS6501x Power Management Driver"); MODULE_LICENSE("GPL"); static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { I2C_CLIENT_END }; I2C_CLIENT_INSMOD; @@ -99,7 +100,7 @@ struct tps65010 { /* not currently tracking GPIO state */ }; -#define POWER_POLL_DELAY msecs_to_jiffies(5000) +#define POWER_POLL_DELAY msecs_to_jiffies(800) /*-------------------------------------------------------------------------*/ @@ -519,11 +520,8 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind) goto fail1; } - /* the IRQ is active low, but many gpio lines can't support that - * so this driver can use falling-edge triggers instead. - */ - irqflags = IRQF_SAMPLE_RANDOM; #ifdef CONFIG_ARM + irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW; if (machine_is_omap_h2()) { tps->model = TPS65010; omap_cfg_reg(W4_GPIO58); @@ -545,6 +543,8 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind) // FIXME set up this board's IRQ ... } +#else + irqflags = IRQF_SAMPLE_RANDOM; #endif if (tps->irq > 0) { diff --git a/trunk/drivers/ieee1394/ohci1394.c b/trunk/drivers/ieee1394/ohci1394.c index 448df2773377..d4bad6704bbe 100644 --- a/trunk/drivers/ieee1394/ohci1394.c +++ b/trunk/drivers/ieee1394/ohci1394.c @@ -3552,8 +3552,6 @@ static int ohci1394_pci_resume (struct pci_dev *pdev) static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) { - pci_save_state(pdev); - #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { struct device_node *of_node; @@ -3565,6 +3563,8 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) } #endif + pci_save_state(pdev); + return 0; } diff --git a/trunk/drivers/infiniband/core/cache.c b/trunk/drivers/infiniband/core/cache.c index 75313ade2e0d..e05ca2cdc73f 100644 --- a/trunk/drivers/infiniband/core/cache.c +++ b/trunk/drivers/infiniband/core/cache.c @@ -301,8 +301,7 @@ static void ib_cache_event(struct ib_event_handler *handler, event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE || - event->event == IB_EVENT_CLIENT_REREGISTER) { + event->event == IB_EVENT_SM_CHANGE) { work = kmalloc(sizeof *work, GFP_ATOMIC); if (work) { INIT_WORK(&work->work, ib_cache_task, work); diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c index d6b84226bba7..aeda484ffd82 100644 --- a/trunk/drivers/infiniband/core/sa_query.c +++ b/trunk/drivers/infiniband/core/sa_query.c @@ -405,8 +405,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE || - event->event == IB_EVENT_CLIENT_REREGISTER) { + event->event == IB_EVENT_SM_CHANGE) { struct ib_sa_device *sa_dev; sa_dev = container_of(handler, typeof(*sa_dev), event_handler); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_main.c b/trunk/drivers/infiniband/hw/mthca/mthca_main.c index 7b82c1907f04..557cde3a4563 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_main.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_main.c @@ -967,12 +967,12 @@ static struct { } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), .flags = 0 }, - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600), + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), .flags = MTHCA_FLAG_PCIE }, - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400), + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, - [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0), + [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_provider.c b/trunk/drivers/infiniband/hw/mthca/mthca_provider.c index 265b1d1c4a62..230ae21db8fd 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1287,7 +1287,11 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); dev->ib_dev.node_type = IB_NODE_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.dma_device = &dev->pdev->dev; @@ -1312,11 +1316,6 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.modify_srq = mthca_modify_srq; dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; - dev->ib_dev.uverbs_cmd_mask |= - (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | - (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | - (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); if (mthca_is_memfree(dev)) dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h index 9a5bece3fa5c..8de2887ba15c 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h @@ -136,8 +136,8 @@ struct mthca_ah { * We have one global lock that protects dev->cq/qp_table. Each * struct mthca_cq/qp also has its own lock. An individual qp lock * may be taken inside of an individual cq lock. Both cqs attached to - * a qp may be locked, with the cq with the lower cqn locked first. - * No other nesting should be done. + * a qp may be locked, with the send cq locked first. No other + * nesting should be done. * * Each struct mthca_cq/qp also has an ref count, protected by the * corresponding table lock. The pointer from the cq/qp_table to the diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index 2e8f6f36e0a5..cd8b6721ac9c 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -99,10 +99,6 @@ enum { MTHCA_QP_BIT_RSC = 1 << 3 }; -enum { - MTHCA_SEND_DOORBELL_FENCE = 1 << 5 -}; - struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; @@ -1263,32 +1259,6 @@ int mthca_alloc_qp(struct mthca_dev *dev, return 0; } -static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) -{ - if (send_cq == recv_cq) - spin_lock_irq(&send_cq->lock); - else if (send_cq->cqn < recv_cq->cqn) { - spin_lock_irq(&send_cq->lock); - spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); - } else { - spin_lock_irq(&recv_cq->lock); - spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); - } -} - -static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) -{ - if (send_cq == recv_cq) - spin_unlock_irq(&send_cq->lock); - else if (send_cq->cqn < recv_cq->cqn) { - spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); - } else { - spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); - } -} - int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, @@ -1341,13 +1311,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - mthca_lock_cqs(send_cq, recv_cq); + spin_lock_irq(&send_cq->lock); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); - mthca_unlock_cqs(send_cq, recv_cq); + if (send_cq != recv_cq) + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); err_out: dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, @@ -1381,7 +1355,9 @@ void mthca_free_qp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - mthca_lock_cqs(send_cq, recv_cq); + spin_lock_irq(&send_cq->lock); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, @@ -1389,7 +1365,9 @@ void mthca_free_qp(struct mthca_dev *dev, --qp->refcount; spin_unlock(&dev->qp_table.lock); - mthca_unlock_cqs(send_cq, recv_cq); + if (send_cq != recv_cq) + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); wait_event(qp->wait, !get_qp_refcount(dev, qp)); @@ -1524,7 +1502,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0; + u32 f0 = 0; int ind; u8 op0 = 0; @@ -1708,8 +1686,6 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; - f0 = wr->send_flags & IB_SEND_FENCE ? - MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; @@ -1867,7 +1843,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0; + u32 f0 = 0; int ind; u8 op0 = 0; @@ -2075,8 +2051,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; - f0 = wr->send_flags & IB_SEND_FENCE ? - MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c index 1437d7ee3b19..34b0da5cfa0a 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -378,6 +378,21 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) return iser_conn_set_full_featured_mode(conn); } +static void +iscsi_iser_conn_terminate(struct iscsi_conn *conn) +{ + struct iscsi_iser_conn *iser_conn = conn->dd_data; + struct iser_conn *ib_conn = iser_conn->ib_conn; + + BUG_ON(!ib_conn); + /* starts conn teardown process, waits until all previously * + * posted buffers get flushed, deallocates all conn resources */ + iser_conn_terminate(ib_conn); + iser_conn->ib_conn = NULL; + conn->recv_lock = NULL; +} + + static struct iscsi_transport iscsi_iser_transport; static struct iscsi_cls_session * @@ -540,13 +555,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) static void iscsi_iser_ep_disconnect(__u64 ep_handle) { - struct iser_conn *ib_conn; + struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); - ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); if (!ib_conn) return; iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); + iser_conn_terminate(ib_conn); } @@ -599,6 +614,9 @@ static struct iscsi_transport iscsi_iser_transport = { .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_conn_stop, + /* these are called as part of conn recovery */ + .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */ + .terminate_conn = iscsi_iser_conn_terminate, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_iser_conn_get_stats, diff --git a/trunk/drivers/input/keyboard/atkbd.c b/trunk/drivers/input/keyboard/atkbd.c index a86afd0a5ef1..6bfa0cf4b1d2 100644 --- a/trunk/drivers/input/keyboard/atkbd.c +++ b/trunk/drivers/input/keyboard/atkbd.c @@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd) i++; dev->rep[REP_PERIOD] = period[i]; - while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY]) + while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) j++; dev->rep[REP_DELAY] = delay[j]; diff --git a/trunk/drivers/input/misc/wistron_btns.c b/trunk/drivers/input/misc/wistron_btns.c index de0f46dd9692..a8efc1af36cb 100644 --- a/trunk/drivers/input/misc/wistron_btns.c +++ b/trunk/drivers/input/misc/wistron_btns.c @@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi) return 1; } -static struct key_entry keymap_empty[] = { +static struct key_entry keymap_empty[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_fs_amilo_pro_v2000[] = { +static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { { KE_KEY, 0x01, KEY_HELP }, { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, @@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] = { { KE_END, 0 } }; -static struct key_entry keymap_fujitsu_n3510[] = { +static struct key_entry keymap_fujitsu_n3510[] __initdata = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_KEY, 0x36, KEY_WWW }, @@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] = { { KE_END, 0 } }; -static struct key_entry keymap_wistron_ms2111[] = { +static struct key_entry keymap_wistron_ms2111[] __initdata = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_KEY, 0x13, KEY_PROG3 }, @@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] = { { KE_END, 0 } }; -static struct key_entry keymap_wistron_ms2141[] = { +static struct key_entry keymap_wistron_ms2141[] __initdata = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_WIFI, 0x30, 0 }, @@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] = { { KE_END, 0 } }; -static struct key_entry keymap_acer_aspire_1500[] = { +static struct key_entry keymap_acer_aspire_1500[] __initdata = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_WIFI, 0x30, 0 }, @@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] = { { KE_END, 0 } }; -static struct key_entry keymap_acer_travelmate_240[] = { +static struct key_entry keymap_acer_travelmate_240[] __initdata = { { KE_KEY, 0x31, KEY_MAIL }, { KE_KEY, 0x36, KEY_WWW }, { KE_KEY, 0x11, KEY_PROG1 }, @@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] = { { KE_END, 0 } }; -static struct key_entry keymap_aopen_1559as[] = { +static struct key_entry keymap_aopen_1559as[] __initdata = { { KE_KEY, 0x01, KEY_HELP }, { KE_KEY, 0x06, KEY_PROG3 }, { KE_KEY, 0x11, KEY_PROG1 }, diff --git a/trunk/drivers/input/mouse/psmouse-base.c b/trunk/drivers/input/mouse/psmouse-base.c index 343afa38f4c2..8bc9f51ae6c2 100644 --- a/trunk/drivers/input/mouse/psmouse-base.c +++ b/trunk/drivers/input/mouse/psmouse-base.c @@ -485,6 +485,13 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties) param[0] = 40; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); + param[0] = 200; + ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); + param[0] = 200; + ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); + param[0] = 60; + ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); + if (set_properties) { set_bit(BTN_MIDDLE, psmouse->dev->keybit); set_bit(REL_WHEEL, psmouse->dev->relbit); diff --git a/trunk/drivers/md/dm-raid1.c b/trunk/drivers/md/dm-raid1.c index c54de989eb00..be48cedf986b 100644 --- a/trunk/drivers/md/dm-raid1.c +++ b/trunk/drivers/md/dm-raid1.c @@ -255,9 +255,7 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region) struct region *reg, *nreg; read_unlock(&rh->hash_lock); - nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); - if (unlikely(!nreg)) - nreg = kmalloc(sizeof(struct region), GFP_NOIO); + nreg = mempool_alloc(rh->region_pool, GFP_NOIO); nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? RH_CLEAN : RH_NOSYNC; nreg->rh = rh; diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index 8dbab2ef3885..b6d16022a53e 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -1597,19 +1597,6 @@ void md_update_sb(mddev_t * mddev) repeat: spin_lock_irq(&mddev->write_lock); - - if (mddev->degraded && mddev->sb_dirty == 3) - /* If the array is degraded, then skipping spares is both - * dangerous and fairly pointless. - * Dangerous because a device that was removed from the array - * might have a event_count that still looks up-to-date, - * so it can be re-added without a resync. - * Pointless because if there are any spares to skip, - * then a recovery will happen and soon that array won't - * be degraded any more and the spare can go back to sleep then. - */ - mddev->sb_dirty = 1; - sync_req = mddev->in_sync; mddev->utime = get_seconds(); if (mddev->sb_dirty == 3) diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index 87bfe9e7d8ca..1efe22a2d041 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -1625,16 +1625,15 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return 0; } + /* before building a request, check if we can skip these blocks.. + * This call the bitmap_start_sync doesn't actually record anything + */ if (mddev->bitmap == NULL && mddev->recovery_cp == MaxSector && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && conf->fullsync == 0) { *skipped = 1; return max_sector - sector_nr; } - /* before building a request, check if we can skip these blocks.. - * This call the bitmap_start_sync doesn't actually record anything - */ if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block, and probably several more */ diff --git a/trunk/drivers/message/fusion/mptbase.h b/trunk/drivers/message/fusion/mptbase.h index c537d71c18e4..d4cb144ab402 100644 --- a/trunk/drivers/message/fusion/mptbase.h +++ b/trunk/drivers/message/fusion/mptbase.h @@ -640,6 +640,7 @@ typedef struct _MPT_ADAPTER struct work_struct fc_setup_reset_work; struct list_head fc_rports; spinlock_t fc_rescan_work_lock; + int fc_rescan_work_count; struct work_struct fc_rescan_work; char fc_rescan_work_q_name[KOBJ_NAME_LEN]; struct workqueue_struct *fc_rescan_work_q; diff --git a/trunk/drivers/message/fusion/mptfc.c b/trunk/drivers/message/fusion/mptfc.c index 85696f34c310..90da7d63b08e 100644 --- a/trunk/drivers/message/fusion/mptfc.c +++ b/trunk/drivers/message/fusion/mptfc.c @@ -669,10 +669,7 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) * if still doing discovery, * hang loose a while until finished */ - if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || - (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && - (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) - == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { + if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { if (count-- > 0) { msleep(100); goto try_again; @@ -898,45 +895,59 @@ mptfc_rescan_devices(void *arg) { MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; int ii; + int work_to_do; u64 pn; + unsigned long flags; struct mptfc_rport_info *ri; - /* start by tagging all ports as missing */ - list_for_each_entry(ri, &ioc->fc_rports, list) { - if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { - ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; + do { + /* start by tagging all ports as missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { + ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; + } } - } - /* - * now rescan devices known to adapter, - * will reregister existing rports - */ - for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { - (void) mptfc_GetFcPortPage0(ioc, ii); - mptfc_init_host_attr(ioc, ii); /* refresh */ - mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); - } + /* + * now rescan devices known to adapter, + * will reregister existing rports + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) mptfc_GetFcPortPage0(ioc, ii); + mptfc_init_host_attr(ioc,ii); /* refresh */ + mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); + } - /* delete devices still missing */ - list_for_each_entry(ri, &ioc->fc_rports, list) { - /* if newly missing, delete it */ - if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { + /* delete devices still missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + /* if newly missing, delete it */ + if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { - ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| - MPT_RPORT_INFO_FLAGS_MISSING); - fc_remote_port_delete(ri->rport); /* won't sleep */ - ri->rport = NULL; + ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| + MPT_RPORT_INFO_FLAGS_MISSING); + fc_remote_port_delete(ri->rport); /* won't sleep */ + ri->rport = NULL; - pn = (u64)ri->pg0.WWPN.High << 32 | - (u64)ri->pg0.WWPN.Low; - dfcprintk ((MYIOC_s_INFO_FMT - "mptfc_rescan.%d: %llx deleted\n", - ioc->name, - ioc->sh->host_no, - (unsigned long long)pn)); + pn = (u64)ri->pg0.WWPN.High << 32 | + (u64)ri->pg0.WWPN.Low; + dfcprintk ((MYIOC_s_INFO_FMT + "mptfc_rescan.%d: %llx deleted\n", + ioc->name, + ioc->sh->host_no, + (unsigned long long)pn)); + } } - } + + /* + * allow multiple passes as target state + * might have changed during scan + */ + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + if (ioc->fc_rescan_work_count > 2) /* only need one more */ + ioc->fc_rescan_work_count = 2; + work_to_do = --ioc->fc_rescan_work_count; + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + } while (work_to_do); } static int @@ -1148,6 +1159,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) * by doing it via the workqueue, some locking is eliminated */ + ioc->fc_rescan_work_count = 1; queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); flush_workqueue(ioc->fc_rescan_work_q); @@ -1190,8 +1202,10 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) case MPI_EVENT_RESCAN: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); + if (ioc->fc_rescan_work_count++ == 0) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; @@ -1234,8 +1248,10 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) mptfc_SetFcPortPage1_defaults(ioc); spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); + if (ioc->fc_rescan_work_count++ == 0) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } diff --git a/trunk/drivers/mtd/nand/ams-delta.c b/trunk/drivers/mtd/nand/ams-delta.c index a0ba07c36ee9..d7897dc6b3c8 100644 --- a/trunk/drivers/mtd/nand/ams-delta.c +++ b/trunk/drivers/mtd/nand/ams-delta.c @@ -130,13 +130,11 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd, if (ctrl & NAND_CTRL_CHANGE) { unsigned long bits; - bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0; - bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0; - bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0; + bits = (~ctrl & NAND_NCE) << 2; + bits |= (ctrl & NAND_CLE) << 7; + bits |= (ctrl & NAND_ALE) << 6; - ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE | - AMS_DELTA_LATCH2_NAND_ALE | - AMS_DELTA_LATCH2_NAND_NCE, bits); + ams_delta_latch2_write(0xC2, bits); } if (cmd != NAND_CMD_NONE) diff --git a/trunk/drivers/mtd/nand/nand_base.c b/trunk/drivers/mtd/nand/nand_base.c index c8cbc00243fe..62b861304e03 100644 --- a/trunk/drivers/mtd/nand/nand_base.c +++ b/trunk/drivers/mtd/nand/nand_base.c @@ -1093,10 +1093,9 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, ret = nand_do_read_ops(mtd, from, &chip->ops); - *retlen = chip->ops.retlen; - nand_release_device(mtd); + *retlen = chip->ops.retlen; return ret; } @@ -1692,10 +1691,9 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, ret = nand_do_write_ops(mtd, to, &chip->ops); - *retlen = chip->ops.retlen; - nand_release_device(mtd); + *retlen = chip->ops.retlen; return ret; } diff --git a/trunk/drivers/net/e1000/e1000.h b/trunk/drivers/net/e1000/e1000.h index d304297c496c..c1107815f22c 100644 --- a/trunk/drivers/net/e1000/e1000.h +++ b/trunk/drivers/net/e1000/e1000.h @@ -242,7 +242,7 @@ struct e1000_adapter { struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct vlan_group *vlgrp; - uint16_t mng_vlan_id; + uint16_t mng_vlan_id; uint32_t bd_number; uint32_t rx_buffer_len; uint32_t part_num; diff --git a/trunk/drivers/net/e1000/e1000_ethtool.c b/trunk/drivers/net/e1000/e1000_ethtool.c index 2baccf864328..ab2f153d5735 100644 --- a/trunk/drivers/net/e1000/e1000_ethtool.c +++ b/trunk/drivers/net/e1000/e1000_ethtool.c @@ -428,12 +428,12 @@ e1000_get_regs(struct net_device *netdev, regs_buff[23] = regs_buff[18]; /* mdix mode */ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); } else { - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); regs_buff[13] = (uint32_t)phy_data; /* cable length */ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ regs_buff[18] = regs_buff[13]; /* cable polarity */ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ @@ -709,7 +709,6 @@ e1000_set_ringparam(struct net_device *netdev, } clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; err_setup_tx: e1000_free_all_rx_resources(adapter); @@ -894,16 +893,17 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) *data = 0; + /* NOTE: we don't test MSI interrupts here, yet */ /* Hook up test interrupt handler just for this test */ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, - netdev->name, netdev)) { - shared_int = FALSE; - } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)){ + netdev->name, netdev)) + shared_int = FALSE; + else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { *data = 1; return -1; } - DPRINTK(PROBE,INFO, "testing %s interrupt\n", + DPRINTK(HW, INFO, "testing %s interrupt\n", (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ @@ -1269,11 +1269,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); /* autoneg off */ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); - } else if (adapter->hw.phy_type == e1000_phy_gg82563) { + } else if (adapter->hw.phy_type == e1000_phy_gg82563) e1000_write_phy_reg(&adapter->hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); - } ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); @@ -1301,9 +1300,9 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) } if (adapter->hw.media_type == e1000_media_type_copper && - adapter->hw.phy_type == e1000_phy_m88) { + adapter->hw.phy_type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - } else { + else { /* Set the ILOS bit on the fiber Nic is half * duplex link is detected. */ stat_reg = E1000_READ_REG(&adapter->hw, STATUS); @@ -1439,11 +1438,10 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_82546_rev_3: default: hw->autoneg = TRUE; - if (hw->phy_type == e1000_phy_gg82563) { + if (hw->phy_type == e1000_phy_gg82563) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); - } e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); if (phy_reg & MII_CR_LOOPBACK) { phy_reg &= ~MII_CR_LOOPBACK; @@ -1915,8 +1913,8 @@ static struct ethtool_ops e1000_ethtool_ops = { .get_regs = e1000_get_regs, .get_wol = e1000_get_wol, .set_wol = e1000_set_wol, - .get_msglevel = e1000_get_msglevel, - .set_msglevel = e1000_set_msglevel, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, .nway_reset = e1000_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = e1000_get_eeprom_len, @@ -1924,17 +1922,17 @@ static struct ethtool_ops e1000_ethtool_ops = { .set_eeprom = e1000_set_eeprom, .get_ringparam = e1000_get_ringparam, .set_ringparam = e1000_set_ringparam, - .get_pauseparam = e1000_get_pauseparam, - .set_pauseparam = e1000_set_pauseparam, - .get_rx_csum = e1000_get_rx_csum, - .set_rx_csum = e1000_set_rx_csum, - .get_tx_csum = e1000_get_tx_csum, - .set_tx_csum = e1000_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, #ifdef NETIF_F_TSO - .get_tso = ethtool_op_get_tso, - .set_tso = e1000_set_tso, + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, #endif .self_test_count = e1000_diag_test_count, .self_test = e1000_diag_test, @@ -1942,7 +1940,7 @@ static struct ethtool_ops e1000_ethtool_ops = { .phys_id = e1000_phys_id, .get_stats_count = e1000_get_stats_count, .get_ethtool_stats = e1000_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_perm_addr = ethtool_op_get_perm_addr, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/trunk/drivers/net/e1000/e1000_hw.c b/trunk/drivers/net/e1000/e1000_hw.c index f62d17848332..57749eb438e4 100644 --- a/trunk/drivers/net/e1000/e1000_hw.c +++ b/trunk/drivers/net/e1000/e1000_hw.c @@ -31,6 +31,7 @@ * Shared functions for accessing and configuring the MAC */ + #include "e1000_hw.h" static int32_t e1000_set_phy_type(struct e1000_hw *hw); @@ -166,10 +167,10 @@ e1000_set_phy_type(struct e1000_hw *hw) { DEBUGFUNC("e1000_set_phy_type"); - if(hw->mac_type == e1000_undefined) + if (hw->mac_type == e1000_undefined) return -E1000_ERR_PHY_TYPE; - switch(hw->phy_id) { + switch (hw->phy_id) { case M88E1000_E_PHY_ID: case M88E1000_I_PHY_ID: case M88E1011_I_PHY_ID: @@ -177,10 +178,10 @@ e1000_set_phy_type(struct e1000_hw *hw) hw->phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: - if(hw->mac_type == e1000_82541 || - hw->mac_type == e1000_82541_rev_2 || - hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82547_rev_2) { + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) { hw->phy_type = e1000_phy_igp; break; } @@ -207,6 +208,7 @@ e1000_set_phy_type(struct e1000_hw *hw) return E1000_SUCCESS; } + /****************************************************************************** * IGP phy init script - initializes the GbE PHY * @@ -220,7 +222,7 @@ e1000_phy_init_script(struct e1000_hw *hw) DEBUGFUNC("e1000_phy_init_script"); - if(hw->phy_init_script) { + if (hw->phy_init_script) { msec_delay(20); /* Save off the current value of register 0x2F5B to be restored at @@ -236,7 +238,7 @@ e1000_phy_init_script(struct e1000_hw *hw) msec_delay(5); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82541: case e1000_82547: e1000_write_phy_reg(hw, 0x1F95, 0x0001); @@ -273,22 +275,22 @@ e1000_phy_init_script(struct e1000_hw *hw) /* Now enable the transmitter */ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - if(hw->mac_type == e1000_82547) { + if (hw->mac_type == e1000_82547) { uint16_t fused, fine, coarse; /* Move to analog registers page */ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); - if(!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused); fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; - if(coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; fine -= IGP01E1000_ANALOG_FUSE_FINE_1; - } else if(coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) fine -= IGP01E1000_ANALOG_FUSE_FINE_10; fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | @@ -418,7 +420,7 @@ e1000_set_mac_type(struct e1000_hw *hw) return -E1000_ERR_MAC_TYPE; } - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_ich8lan: hw->swfwhw_semaphore_present = TRUE; hw->asf_firmware_present = TRUE; @@ -456,7 +458,7 @@ e1000_set_media_type(struct e1000_hw *hw) DEBUGFUNC("e1000_set_media_type"); - if(hw->mac_type != e1000_82543) { + if (hw->mac_type != e1000_82543) { /* tbi_compatibility is only valid on 82543 */ hw->tbi_compatibility_en = FALSE; } @@ -516,16 +518,16 @@ e1000_reset_hw(struct e1000_hw *hw) DEBUGFUNC("e1000_reset_hw"); /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ - if(hw->mac_type == e1000_82542_rev2_0) { + if (hw->mac_type == e1000_82542_rev2_0) { DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); e1000_pci_clear_mwi(hw); } - if(hw->bus_type == e1000_bus_type_pci_express) { + if (hw->bus_type == e1000_bus_type_pci_express) { /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ - if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) { + if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) { DEBUGOUT("PCI-E Master disable polling has failed.\n"); } } @@ -553,14 +555,14 @@ e1000_reset_hw(struct e1000_hw *hw) ctrl = E1000_READ_REG(hw, CTRL); /* Must reset the PHY before resetting the MAC */ - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); msec_delay(5); } /* Must acquire the MDIO ownership before MAC reset. * Ownership defaults to firmware after a reset. */ - if(hw->mac_type == e1000_82573) { + if (hw->mac_type == e1000_82573) { timeout = 10; extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); @@ -570,14 +572,14 @@ e1000_reset_hw(struct e1000_hw *hw) E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); - if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) break; else extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; msec_delay(2); timeout--; - } while(timeout); + } while (timeout); } /* Workaround for ICH8 bit corruption issue in FIFO memory */ @@ -595,7 +597,7 @@ e1000_reset_hw(struct e1000_hw *hw) */ DEBUGOUT("Issuing a global reset to MAC\n"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82544: case e1000_82540: case e1000_82545: @@ -634,7 +636,7 @@ e1000_reset_hw(struct e1000_hw *hw) * device. Later controllers reload the EEPROM automatically, so just wait * for reload to complete. */ - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: @@ -669,7 +671,7 @@ e1000_reset_hw(struct e1000_hw *hw) case e1000_ich8lan: case e1000_80003es2lan: ret_val = e1000_get_auto_rd_done(hw); - if(ret_val) + if (ret_val) /* We don't want to continue accessing MAC registers. */ return ret_val; break; @@ -680,13 +682,13 @@ e1000_reset_hw(struct e1000_hw *hw) } /* Disable HW ARPs on ASF enabled adapters */ - if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { + if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { manc = E1000_READ_REG(hw, MANC); manc &= ~(E1000_MANC_ARP_EN); E1000_WRITE_REG(hw, MANC, manc); } - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { e1000_phy_init_script(hw); /* Configure activity LED after PHY reset */ @@ -704,8 +706,8 @@ e1000_reset_hw(struct e1000_hw *hw) icr = E1000_READ_REG(hw, ICR); /* If MWI was previously enabled, reenable it. */ - if(hw->mac_type == e1000_82542_rev2_0) { - if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } @@ -758,7 +760,7 @@ e1000_init_hw(struct e1000_hw *hw) /* Initialize Identification LED */ ret_val = e1000_id_led_init(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Initializing Identification LED\n"); return ret_val; } @@ -776,7 +778,7 @@ e1000_init_hw(struct e1000_hw *hw) } /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ - if(hw->mac_type == e1000_82542_rev2_0) { + if (hw->mac_type == e1000_82542_rev2_0) { DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); e1000_pci_clear_mwi(hw); E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST); @@ -790,11 +792,11 @@ e1000_init_hw(struct e1000_hw *hw) e1000_init_rx_addrs(hw); /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ - if(hw->mac_type == e1000_82542_rev2_0) { + if (hw->mac_type == e1000_82542_rev2_0) { E1000_WRITE_REG(hw, RCTL, 0); E1000_WRITE_FLUSH(hw); msec_delay(1); - if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } @@ -803,7 +805,7 @@ e1000_init_hw(struct e1000_hw *hw) mta_size = E1000_MC_TBL_SIZE; if (hw->mac_type == e1000_ich8lan) mta_size = E1000_MC_TBL_SIZE_ICH8LAN; - for(i = 0; i < mta_size; i++) { + for (i = 0; i < mta_size; i++) { E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); /* use write flush to prevent Memory Write Block (MWB) from * occuring when accessing our register space */ @@ -815,18 +817,18 @@ e1000_init_hw(struct e1000_hw *hw) * gives equal priority to transmits and receives. Valid only on * 82542 and 82543 silicon. */ - if(hw->dma_fairness && hw->mac_type <= e1000_82543) { + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { ctrl = E1000_READ_REG(hw, CTRL); E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); } - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82545_rev_3: case e1000_82546_rev_3: break; default: /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ - if(hw->bus_type == e1000_bus_type_pcix) { + if (hw->bus_type == e1000_bus_type_pcix) { e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word); e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); @@ -834,9 +836,9 @@ e1000_init_hw(struct e1000_hw *hw) PCIX_COMMAND_MMRBC_SHIFT; stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> PCIX_STATUS_HI_MMRBC_SHIFT; - if(stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; - if(cmd_mmrbc > stat_mmrbc) { + if (cmd_mmrbc > stat_mmrbc) { pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK; pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, @@ -854,7 +856,7 @@ e1000_init_hw(struct e1000_hw *hw) ret_val = e1000_setup_link(hw); /* Set the transmit descriptor write-back policy */ - if(hw->mac_type > e1000_82544) { + if (hw->mac_type > e1000_82544) { ctrl = E1000_READ_REG(hw, TXDCTL); ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; switch (hw->mac_type) { @@ -905,14 +907,13 @@ e1000_init_hw(struct e1000_hw *hw) case e1000_ich8lan: ctrl = E1000_READ_REG(hw, TXDCTL1); ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; - if(hw->mac_type >= e1000_82571) + if (hw->mac_type >= e1000_82571) ctrl |= E1000_TXDCTL_COUNT_DESC; E1000_WRITE_REG(hw, TXDCTL1, ctrl); break; } - if (hw->mac_type == e1000_82573) { uint32_t gcr = E1000_READ_REG(hw, GCR); gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; @@ -956,10 +957,10 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw) DEBUGFUNC("e1000_adjust_serdes_amplitude"); - if(hw->media_type != e1000_media_type_internal_serdes) + if (hw->media_type != e1000_media_type_internal_serdes) return E1000_SUCCESS; - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82545_rev_3: case e1000_82546_rev_3: break; @@ -972,11 +973,11 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw) return ret_val; } - if(eeprom_data != EEPROM_RESERVED_WORD) { + if (eeprom_data != EEPROM_RESERVED_WORD) { /* Adjust SERDES output amplitude only. */ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -1044,10 +1045,10 @@ e1000_setup_link(struct e1000_hw *hw) * in case we get disconnected and then reconnected into a different * hub or switch with different Flow Control capabilities. */ - if(hw->mac_type == e1000_82542_rev2_0) + if (hw->mac_type == e1000_82542_rev2_0) hw->fc &= (~e1000_fc_tx_pause); - if((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) hw->fc &= (~e1000_fc_rx_pause); hw->original_fc = hw->fc; @@ -1062,12 +1063,12 @@ e1000_setup_link(struct e1000_hw *hw) * or e1000_phy_setup() is called. */ if (hw->mac_type == e1000_82543) { - ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, - 1, &eeprom_data); - if (ret_val) { - DEBUGOUT("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << SWDPIO__EXT_SHIFT); E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); @@ -1100,14 +1101,14 @@ e1000_setup_link(struct e1000_hw *hw) * ability to transmit pause frames in not enabled, then these * registers will be set to 0. */ - if(!(hw->fc & e1000_fc_tx_pause)) { + if (!(hw->fc & e1000_fc_tx_pause)) { E1000_WRITE_REG(hw, FCRTL, 0); E1000_WRITE_REG(hw, FCRTH, 0); } else { /* We need to set up the Receive Threshold high and low water marks * as well as (optionally) enabling the transmission of XON frames. */ - if(hw->fc_send_xon) { + if (hw->fc_send_xon) { E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); } else { @@ -1154,11 +1155,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) * the EEPROM. */ ctrl = E1000_READ_REG(hw, CTRL); - if(hw->media_type == e1000_media_type_fiber) + if (hw->media_type == e1000_media_type_fiber) signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; ret_val = e1000_adjust_serdes_amplitude(hw); - if(ret_val) + if (ret_val) return ret_val; /* Take the link out of reset */ @@ -1166,7 +1167,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) /* Adjust VCO speed to improve BER performance */ ret_val = e1000_set_vco_speed(hw); - if(ret_val) + if (ret_val) return ret_val; e1000_config_collision_dist(hw); @@ -1237,15 +1238,15 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) * less than 500 milliseconds even if the other end is doing it in SW). * For internal serdes, we just assume a signal is present, then poll. */ - if(hw->media_type == e1000_media_type_internal_serdes || + if (hw->media_type == e1000_media_type_internal_serdes || (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) { DEBUGOUT("Looking for Link\n"); - for(i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { msec_delay(10); status = E1000_READ_REG(hw, STATUS); - if(status & E1000_STATUS_LU) break; + if (status & E1000_STATUS_LU) break; } - if(i == (LINK_UP_TIMEOUT / 10)) { + if (i == (LINK_UP_TIMEOUT / 10)) { DEBUGOUT("Never got a valid link from auto-neg!!!\n"); hw->autoneg_failed = 1; /* AutoNeg failed to achieve a link, so we'll call @@ -1254,7 +1255,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) * non-autonegotiating link partners. */ ret_val = e1000_check_for_link(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error while checking for link\n"); return ret_val; } @@ -1288,7 +1289,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) * the PHY speed and duplex configuration is. In addition, we need to * perform a hardware reset on the PHY to take it out of reset. */ - if(hw->mac_type > e1000_82543) { + if (hw->mac_type > e1000_82543) { ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, CTRL, ctrl); @@ -1296,13 +1297,13 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); E1000_WRITE_REG(hw, CTRL, ctrl); ret_val = e1000_phy_hw_reset(hw); - if(ret_val) + if (ret_val) return ret_val; } /* Make sure we have a valid PHY */ ret_val = e1000_detect_gig_phy(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error, did not detect valid phy.\n"); return ret_val; } @@ -1310,19 +1311,19 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) /* Set PHY to class A mode (if necessary) */ ret_val = e1000_set_phy_mode(hw); - if(ret_val) + if (ret_val) return ret_val; - if((hw->mac_type == e1000_82545_rev_3) || + if ((hw->mac_type == e1000_82545_rev_3) || (hw->mac_type == e1000_82546_rev_3)) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); phy_data |= 0x00000008; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); } - if(hw->mac_type <= e1000_82543 || - hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) hw->phy_reset_disable = FALSE; return E1000_SUCCESS; @@ -1352,7 +1353,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) return ret_val; } - /* Wait 10ms for MAC to configure PHY from eeprom settings */ + /* Wait 15ms for MAC to configure PHY from eeprom settings */ msec_delay(15); if (hw->mac_type != e1000_ich8lan) { /* Configure activity LED after PHY reset */ @@ -1407,45 +1408,45 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) } } ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* set auto-master slave resolution settings */ - if(hw->autoneg) { + if (hw->autoneg) { e1000_ms_type phy_ms_setting = hw->master_slave; - if(hw->ffe_config_state == e1000_ffe_config_active) + if (hw->ffe_config_state == e1000_ffe_config_active) hw->ffe_config_state = e1000_ffe_config_enabled; - if(hw->dsp_config_state == e1000_dsp_config_activated) + if (hw->dsp_config_state == e1000_dsp_config_activated) hw->dsp_config_state = e1000_dsp_config_enabled; /* when autonegotiation advertisment is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ - if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); - if(ret_val) + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1000_write_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if(ret_val) + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) return ret_val; /* Set auto Master/Slave resolution process */ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~CR_1000T_MS_ENABLE; ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; } ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; /* load defaults for future use */ @@ -1469,7 +1470,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) break; } ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -1490,12 +1491,12 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) DEBUGFUNC("e1000_copper_link_ggp_setup"); - if(!hw->phy_reset_disable) { + if (!hw->phy_reset_disable) { /* Enable CRS on TX for half-duplex operation. */ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; @@ -1504,7 +1505,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* Options: @@ -1515,7 +1516,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; @@ -1540,11 +1541,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) * 1 - Enabled */ phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; - if(hw->disable_polarity_correction == 1) + if (hw->disable_polarity_correction == 1) phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* SW Reset the PHY so all changes take effect */ @@ -1600,9 +1601,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) return ret_val; phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, phy_data); + if (ret_val) return ret_val; } @@ -1637,12 +1638,12 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) DEBUGFUNC("e1000_copper_link_mgp_setup"); - if(hw->phy_reset_disable) + if (hw->phy_reset_disable) return E1000_SUCCESS; /* Enable CRS on TX. This must be set for half-duplex operation. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; @@ -1679,7 +1680,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if(hw->disable_polarity_correction == 1) + if (hw->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) @@ -1719,7 +1720,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) /* SW Reset the PHY so all changes take effect */ ret_val = e1000_phy_reset(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Resetting the PHY\n"); return ret_val; } @@ -1749,7 +1750,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) /* If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ - if(hw->autoneg_advertised == 0) + if (hw->autoneg_advertised == 0) hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; /* IFE phy only supports 10/100 */ @@ -1758,7 +1759,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Setting up Auto-Negotiation\n"); return ret_val; } @@ -1768,20 +1769,20 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) * the Auto Neg Restart bit in the PHY control register. */ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ - if(hw->wait_autoneg_complete) { + if (hw->wait_autoneg_complete) { ret_val = e1000_wait_autoneg(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error while waiting for autoneg to complete\n"); return ret_val; } @@ -1792,7 +1793,6 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) return E1000_SUCCESS; } - /****************************************************************************** * Config the MAC and the PHY after link is up. * 1) Set up the MAC to the current PHY speed/duplex @@ -1811,25 +1811,25 @@ e1000_copper_link_postconfig(struct e1000_hw *hw) int32_t ret_val; DEBUGFUNC("e1000_copper_link_postconfig"); - if(hw->mac_type >= e1000_82544) { + if (hw->mac_type >= e1000_82544) { e1000_config_collision_dist(hw); } else { ret_val = e1000_config_mac_to_phy(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error configuring MAC to PHY settings\n"); return ret_val; } } ret_val = e1000_config_fc_after_link_up(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Configuring Flow Control\n"); return ret_val; } /* Config DSP to improve Giga link quality */ - if(hw->phy_type == e1000_phy_igp) { + if (hw->phy_type == e1000_phy_igp) { ret_val = e1000_config_dsp_after_link_change(hw, TRUE); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Configuring DSP after link up\n"); return ret_val; } @@ -1875,7 +1875,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) /* Check if it is a valid PHY and set PHY mode if necessary. */ ret_val = e1000_copper_link_preconfig(hw); - if(ret_val) + if (ret_val) return ret_val; switch (hw->mac_type) { @@ -1896,30 +1896,30 @@ e1000_setup_copper_link(struct e1000_hw *hw) hw->phy_type == e1000_phy_igp_3 || hw->phy_type == e1000_phy_igp_2) { ret_val = e1000_copper_link_igp_setup(hw); - if(ret_val) + if (ret_val) return ret_val; } else if (hw->phy_type == e1000_phy_m88) { ret_val = e1000_copper_link_mgp_setup(hw); - if(ret_val) + if (ret_val) return ret_val; } else if (hw->phy_type == e1000_phy_gg82563) { ret_val = e1000_copper_link_ggp_setup(hw); - if(ret_val) + if (ret_val) return ret_val; } - if(hw->autoneg) { + if (hw->autoneg) { /* Setup autoneg and flow control advertisement * and perform autonegotiation */ ret_val = e1000_copper_link_autoneg(hw); - if(ret_val) + if (ret_val) return ret_val; } else { /* PHY will be set to 10H, 10F, 100H,or 100F * depending on value from forced_speed_duplex. */ DEBUGOUT("Forcing speed and duplex\n"); ret_val = e1000_phy_force_speed_duplex(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Forcing Speed and Duplex\n"); return ret_val; } @@ -1928,18 +1928,18 @@ e1000_setup_copper_link(struct e1000_hw *hw) /* Check link status. Wait up to 100 microseconds for link to become * valid. */ - for(i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) { ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if(phy_data & MII_SR_LINK_STATUS) { + if (phy_data & MII_SR_LINK_STATUS) { /* Config the MAC and PHY after link is up */ ret_val = e1000_copper_link_postconfig(hw); - if(ret_val) + if (ret_val) return ret_val; DEBUGOUT("Valid link established!!!\n"); @@ -2041,7 +2041,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if(ret_val) + if (ret_val) return ret_val; if (hw->phy_type != e1000_phy_ife) { @@ -2069,36 +2069,36 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ - if(hw->autoneg_advertised & ADVERTISE_10_HALF) { + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { DEBUGOUT("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ - if(hw->autoneg_advertised & ADVERTISE_10_FULL) { + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { DEBUGOUT("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ - if(hw->autoneg_advertised & ADVERTISE_100_HALF) { + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { DEBUGOUT("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ - if(hw->autoneg_advertised & ADVERTISE_100_FULL) { + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { DEBUGOUT("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ - if(hw->autoneg_advertised & ADVERTISE_1000_HALF) { + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n"); } /* Do we want to advertise 1000 Mb Full Duplex? */ - if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { DEBUGOUT("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; if (hw->phy_type == e1000_phy_ife) { @@ -2160,7 +2160,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) } ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); - if(ret_val) + if (ret_val) return ret_val; DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); @@ -2208,7 +2208,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) /* Read the MII Control Register. */ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); - if(ret_val) + if (ret_val) return ret_val; /* We need to disable autoneg in order to force link and duplex. */ @@ -2216,8 +2216,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; /* Are we forcing Full or Half Duplex? */ - if(hw->forced_speed_duplex == e1000_100_full || - hw->forced_speed_duplex == e1000_10_full) { + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { /* We want to force full duplex so we SET the full duplex bits in the * Device and MII Control Registers. */ @@ -2234,7 +2234,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) } /* Are we forcing 100Mbps??? */ - if(hw->forced_speed_duplex == e1000_100_full || + if (hw->forced_speed_duplex == e1000_100_full || hw->forced_speed_duplex == e1000_100_half) { /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ ctrl |= E1000_CTRL_SPD_100; @@ -2257,7 +2257,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) if ((hw->phy_type == e1000_phy_m88) || (hw->phy_type == e1000_phy_gg82563)) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI @@ -2265,7 +2265,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); @@ -2289,20 +2289,20 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) * forced whenever speed or duplex are forced. */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; } /* Write back the modified PHY MII control register. */ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); - if(ret_val) + if (ret_val) return ret_val; udelay(1); @@ -2314,50 +2314,50 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) * only if the user has set wait_autoneg_complete to 1, which is * the default. */ - if(hw->wait_autoneg_complete) { + if (hw->wait_autoneg_complete) { /* We will wait for autoneg to complete. */ DEBUGOUT("Waiting for forced speed/duplex link.\n"); mii_status_reg = 0; /* We will wait for autoneg to complete or 4.5 seconds to expire. */ - for(i = PHY_FORCE_TIME; i > 0; i--) { + for (i = PHY_FORCE_TIME; i > 0; i--) { /* Read the MII Status Register and wait for Auto-Neg Complete bit * to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; - if(mii_status_reg & MII_SR_LINK_STATUS) break; + if (mii_status_reg & MII_SR_LINK_STATUS) break; msec_delay(100); } - if((i == 0) && + if ((i == 0) && ((hw->phy_type == e1000_phy_m88) || (hw->phy_type == e1000_phy_gg82563))) { /* We didn't get link. Reset the DSP and wait again for link. */ ret_val = e1000_phy_reset_dsp(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error Resetting PHY DSP\n"); return ret_val; } } /* This loop will early-out if the link condition has been met. */ - for(i = PHY_FORCE_TIME; i > 0; i--) { - if(mii_status_reg & MII_SR_LINK_STATUS) break; + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) break; msec_delay(100); /* Read the MII Status Register and wait for Auto-Neg Complete bit * to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; } } @@ -2368,32 +2368,31 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) * defaults back to a 2.5MHz clock when the PHY is reset. */ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* In addition, because of the s/w reset above, we need to enable CRS on * TX. This must be set for both full and half duplex operation. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; - if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && - (!hw->autoneg) && - (hw->forced_speed_duplex == e1000_10_full || - hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { ret_val = e1000_polarity_reversal_workaround(hw); - if(ret_val) + if (ret_val) return ret_val; } } else if (hw->phy_type == e1000_phy_gg82563) { @@ -2484,10 +2483,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) * registers depending on negotiated values. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if(phy_data & M88E1000_PSSR_DPLX) + if (phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; else ctrl &= ~E1000_CTRL_FD; @@ -2497,9 +2496,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) /* Set up speed in the Device Control register depending on * negotiated values. */ - if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) ctrl |= E1000_CTRL_SPD_1000; - else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) ctrl |= E1000_CTRL_SPD_100; /* Write the configured values back to the Device Control Reg. */ @@ -2567,7 +2566,7 @@ e1000_force_mac_fc(struct e1000_hw *hw) } /* Disable TX Flow Control for 82542 (rev 2.0) */ - if(hw->mac_type == e1000_82542_rev2_0) + if (hw->mac_type == e1000_82542_rev2_0) ctrl &= (~E1000_CTRL_TFCE); E1000_WRITE_REG(hw, CTRL, ctrl); @@ -2601,11 +2600,12 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ - if(((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || - ((hw->media_type == e1000_media_type_internal_serdes) && (hw->autoneg_failed)) || - ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { ret_val = e1000_force_mac_fc(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error forcing flow control settings\n"); return ret_val; } @@ -2616,19 +2616,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * has completed, and if so, how the PHY and link partner has * flow control configured. */ - if((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { /* Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; - if(mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement Register * (Address 4) and the Auto_Negotiation Base Page Ability @@ -2637,11 +2637,11 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); - if(ret_val) + if (ret_val) return ret_val; /* Two bits in the Auto Negotiation Advertisement Register @@ -2678,15 +2678,15 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * 1 | DC | 1 | DC | e1000_fc_full * */ - if((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { /* Now we need to check if the user selected RX ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise RX * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ - if(hw->original_fc == e1000_fc_full) { + if (hw->original_fc == e1000_fc_full) { hw->fc = e1000_fc_full; DEBUGOUT("Flow Control = FULL.\n"); } else { @@ -2702,10 +2702,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * 0 | 1 | 1 | 1 | e1000_fc_tx_pause * */ - else if(!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = e1000_fc_tx_pause; DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); } @@ -2717,10 +2717,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * */ - else if((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc = e1000_fc_rx_pause; DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); } @@ -2744,9 +2744,9 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * be asked to delay transmission of packets than asking * our link partner to pause transmission of frames. */ - else if((hw->original_fc == e1000_fc_none || - hw->original_fc == e1000_fc_tx_pause) || - hw->fc_strict_ieee) { + else if ((hw->original_fc == e1000_fc_none || + hw->original_fc == e1000_fc_tx_pause) || + hw->fc_strict_ieee) { hw->fc = e1000_fc_none; DEBUGOUT("Flow Control = NONE.\n"); } else { @@ -2759,19 +2759,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) * enabled per IEEE 802.3 spec. */ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); return ret_val; } - if(duplex == HALF_DUPLEX) + if (duplex == HALF_DUPLEX) hw->fc = e1000_fc_none; /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ ret_val = e1000_force_mac_fc(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error forcing flow control settings\n"); return ret_val; } @@ -2810,13 +2810,13 @@ e1000_check_for_link(struct e1000_hw *hw) * set when the optics detect a signal. On older adapters, it will be * cleared when there is a signal. This applies to fiber media only. */ - if((hw->media_type == e1000_media_type_fiber) || - (hw->media_type == e1000_media_type_internal_serdes)) { + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { rxcw = E1000_READ_REG(hw, RXCW); - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; - if(status & E1000_STATUS_LU) + if (status & E1000_STATUS_LU) hw->get_link_status = FALSE; } } @@ -2827,20 +2827,20 @@ e1000_check_for_link(struct e1000_hw *hw) * receive a Link Status Change interrupt or we have Rx Sequence * Errors. */ - if((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. * Read the register twice since the link bit is sticky. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if(phy_data & MII_SR_LINK_STATUS) { + if (phy_data & MII_SR_LINK_STATUS) { hw->get_link_status = FALSE; /* Check if there was DownShift, must be checked immediately after * link-up */ @@ -2854,10 +2854,10 @@ e1000_check_for_link(struct e1000_hw *hw) * happen due to the execution of this workaround. */ - if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && - (!hw->autoneg) && - (hw->forced_speed_duplex == e1000_10_full || - hw->forced_speed_duplex == e1000_10_half)) { + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { E1000_WRITE_REG(hw, IMC, 0xffffffff); ret_val = e1000_polarity_reversal_workaround(hw); icr = E1000_READ_REG(hw, ICR); @@ -2874,7 +2874,7 @@ e1000_check_for_link(struct e1000_hw *hw) /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ - if(!hw->autoneg) return -E1000_ERR_CONFIG; + if (!hw->autoneg) return -E1000_ERR_CONFIG; /* optimize the dsp settings for the igp phy */ e1000_config_dsp_after_link_change(hw, TRUE); @@ -2887,11 +2887,11 @@ e1000_check_for_link(struct e1000_hw *hw) * speed/duplex on the MAC to the current PHY speed/duplex * settings. */ - if(hw->mac_type >= e1000_82544) + if (hw->mac_type >= e1000_82544) e1000_config_collision_dist(hw); else { ret_val = e1000_config_mac_to_phy(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error configuring MAC to PHY settings\n"); return ret_val; } @@ -2902,7 +2902,7 @@ e1000_check_for_link(struct e1000_hw *hw) * have had to re-autoneg with a different link partner. */ ret_val = e1000_config_fc_after_link_up(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error configuring flow control\n"); return ret_val; } @@ -2914,7 +2914,7 @@ e1000_check_for_link(struct e1000_hw *hw) * at gigabit speed, then TBI compatibility is not needed. If we are * at gigabit speed, we turn on TBI compatibility. */ - if(hw->tbi_compatibility_en) { + if (hw->tbi_compatibility_en) { uint16_t speed, duplex; ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) { @@ -2925,7 +2925,7 @@ e1000_check_for_link(struct e1000_hw *hw) /* If link speed is not set to gigabit speed, we do not need * to enable TBI compatibility. */ - if(hw->tbi_compatibility_on) { + if (hw->tbi_compatibility_on) { /* If we previously were in the mode, turn it off. */ rctl = E1000_READ_REG(hw, RCTL); rctl &= ~E1000_RCTL_SBP; @@ -2938,7 +2938,7 @@ e1000_check_for_link(struct e1000_hw *hw) * packets. Some frames have an additional byte on the end and * will look like CRC errors to to the hardware. */ - if(!hw->tbi_compatibility_on) { + if (!hw->tbi_compatibility_on) { hw->tbi_compatibility_on = TRUE; rctl = E1000_READ_REG(hw, RCTL); rctl |= E1000_RCTL_SBP; @@ -2954,12 +2954,12 @@ e1000_check_for_link(struct e1000_hw *hw) * auto-negotiation time to complete, in case the cable was just plugged * in. The autoneg_failed flag does this. */ - else if((((hw->media_type == e1000_media_type_fiber) && + else if ((((hw->media_type == e1000_media_type_fiber) && ((ctrl & E1000_CTRL_SWDPIN1) == signal)) || - (hw->media_type == e1000_media_type_internal_serdes)) && - (!(status & E1000_STATUS_LU)) && - (!(rxcw & E1000_RXCW_C))) { - if(hw->autoneg_failed == 0) { + (hw->media_type == e1000_media_type_internal_serdes)) && + (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { hw->autoneg_failed = 1; return 0; } @@ -2975,7 +2975,7 @@ e1000_check_for_link(struct e1000_hw *hw) /* Configure Flow Control after forcing link up. */ ret_val = e1000_config_fc_after_link_up(hw); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error configuring flow control\n"); return ret_val; } @@ -2985,9 +2985,9 @@ e1000_check_for_link(struct e1000_hw *hw) * Device Control register in an attempt to auto-negotiate with our link * partner. */ - else if(((hw->media_type == e1000_media_type_fiber) || - (hw->media_type == e1000_media_type_internal_serdes)) && - (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + else if (((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) && + (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); E1000_WRITE_REG(hw, TXCW, hw->txcw); E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -2997,12 +2997,12 @@ e1000_check_for_link(struct e1000_hw *hw) /* If we force link for non-auto-negotiation switch, check link status * based on MAC synchronization for internal serdes media type. */ - else if((hw->media_type == e1000_media_type_internal_serdes) && - !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { + else if ((hw->media_type == e1000_media_type_internal_serdes) && + !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { /* SYNCH bit and IV bit are sticky. */ udelay(10); - if(E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { - if(!(rxcw & E1000_RXCW_IV)) { + if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { hw->serdes_link_down = FALSE; DEBUGOUT("SERDES: Link is up.\n"); } @@ -3011,8 +3011,8 @@ e1000_check_for_link(struct e1000_hw *hw) DEBUGOUT("SERDES: Link is down.\n"); } } - if((hw->media_type == e1000_media_type_internal_serdes) && - (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { + if ((hw->media_type == e1000_media_type_internal_serdes) && + (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS)); } return E1000_SUCCESS; @@ -3036,12 +3036,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, DEBUGFUNC("e1000_get_speed_and_duplex"); - if(hw->mac_type >= e1000_82543) { + if (hw->mac_type >= e1000_82543) { status = E1000_READ_REG(hw, STATUS); - if(status & E1000_STATUS_SPEED_1000) { + if (status & E1000_STATUS_SPEED_1000) { *speed = SPEED_1000; DEBUGOUT("1000 Mbs, "); - } else if(status & E1000_STATUS_SPEED_100) { + } else if (status & E1000_STATUS_SPEED_100) { *speed = SPEED_100; DEBUGOUT("100 Mbs, "); } else { @@ -3049,7 +3049,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, DEBUGOUT("10 Mbs, "); } - if(status & E1000_STATUS_FD) { + if (status & E1000_STATUS_FD) { *duplex = FULL_DUPLEX; DEBUGOUT("Full Duplex\n"); } else { @@ -3066,18 +3066,18 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, * if it is operating at half duplex. Here we set the duplex settings to * match the duplex in the link partner's capabilities. */ - if(hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if(!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) *duplex = HALF_DUPLEX; else { ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) *duplex = HALF_DUPLEX; } @@ -3118,17 +3118,17 @@ e1000_wait_autoneg(struct e1000_hw *hw) DEBUGOUT("Waiting for Auto-Neg to complete.\n"); /* We will wait for autoneg to complete or 4.5 seconds to expire. */ - for(i = PHY_AUTO_NEG_TIME; i > 0; i--) { + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { /* Read the MII Status Register and wait for Auto-Neg * Complete bit to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if(phy_data & MII_SR_AUTONEG_COMPLETE) { + if (phy_data & MII_SR_AUTONEG_COMPLETE) { return E1000_SUCCESS; } msec_delay(100); @@ -3201,14 +3201,16 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw, /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); - while(mask) { + while (mask) { /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and * then raising and lowering the Management Data Clock. A "0" is * shifted out to the PHY by setting the MDIO bit to "0" and then * raising and lowering the clock. */ - if(data & mask) ctrl |= E1000_CTRL_MDIO; - else ctrl &= ~E1000_CTRL_MDIO; + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; E1000_WRITE_REG(hw, CTRL, ctrl); E1000_WRITE_FLUSH(hw); @@ -3259,12 +3261,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) e1000_raise_mdi_clk(hw, &ctrl); e1000_lower_mdi_clk(hw, &ctrl); - for(data = 0, i = 0; i < 16; i++) { + for (data = 0, i = 0; i < 16; i++) { data = data << 1; e1000_raise_mdi_clk(hw, &ctrl); ctrl = E1000_READ_REG(hw, CTRL); /* Check to see if we shifted in a "1". */ - if(ctrl & E1000_CTRL_MDIO) data |= 1; + if (ctrl & E1000_CTRL_MDIO) + data |= 1; e1000_lower_mdi_clk(hw, &ctrl); } @@ -3290,7 +3293,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) if (!hw->swfw_sync_present) return e1000_get_hw_eeprom_semaphore(hw); - while(timeout) { + while (timeout) { if (e1000_get_hw_eeprom_semaphore(hw)) return -E1000_ERR_SWFW_SYNC; @@ -3379,7 +3382,7 @@ e1000_read_phy_reg(struct e1000_hw *hw, (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (uint16_t)reg_addr); - if(ret_val) { + if (ret_val) { e1000_swfw_sync_release(hw, swfw); return ret_val; } @@ -3424,12 +3427,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, DEBUGFUNC("e1000_read_phy_reg_ex"); - if(reg_addr > MAX_PHY_REG_ADDRESS) { + if (reg_addr > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); return -E1000_ERR_PARAM; } - if(hw->mac_type > e1000_82543) { + if (hw->mac_type > e1000_82543) { /* Set up Op-code, Phy Address, and register address in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. @@ -3441,16 +3444,16 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, E1000_WRITE_REG(hw, MDIC, mdic); /* Poll the ready bit to see if the MDI read completed */ - for(i = 0; i < 64; i++) { + for (i = 0; i < 64; i++) { udelay(50); mdic = E1000_READ_REG(hw, MDIC); - if(mdic & E1000_MDIC_READY) break; + if (mdic & E1000_MDIC_READY) break; } - if(!(mdic & E1000_MDIC_READY)) { + if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Read did not complete\n"); return -E1000_ERR_PHY; } - if(mdic & E1000_MDIC_ERROR) { + if (mdic & E1000_MDIC_ERROR) { DEBUGOUT("MDI Error\n"); return -E1000_ERR_PHY; } @@ -3519,7 +3522,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (uint16_t)reg_addr); - if(ret_val) { + if (ret_val) { e1000_swfw_sync_release(hw, swfw); return ret_val; } @@ -3564,12 +3567,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, DEBUGFUNC("e1000_write_phy_reg_ex"); - if(reg_addr > MAX_PHY_REG_ADDRESS) { + if (reg_addr > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); return -E1000_ERR_PARAM; } - if(hw->mac_type > e1000_82543) { + if (hw->mac_type > e1000_82543) { /* Set up Op-code, Phy Address, register address, and data intended * for the PHY register in the MDI Control register. The MAC will take * care of interfacing with the PHY to send the desired data. @@ -3582,12 +3585,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, E1000_WRITE_REG(hw, MDIC, mdic); /* Poll the ready bit to see if the MDI read completed */ - for(i = 0; i < 640; i++) { + for (i = 0; i < 641; i++) { udelay(5); mdic = E1000_READ_REG(hw, MDIC); - if(mdic & E1000_MDIC_READY) break; + if (mdic & E1000_MDIC_READY) break; } - if(!(mdic & E1000_MDIC_READY)) { + if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Write did not complete\n"); return -E1000_ERR_PHY; } @@ -3699,7 +3702,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) DEBUGOUT("Resetting Phy...\n"); - if(hw->mac_type > e1000_82543) { + if (hw->mac_type > e1000_82543) { if ((hw->mac_type == e1000_80003es2lan) && (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { swfw = E1000_SWFW_PHY1_SM; @@ -3747,7 +3750,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) } udelay(150); - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { /* Configure activity LED after PHY reset */ led_ctrl = E1000_READ_REG(hw, LEDCTL); led_ctrl &= IGP_ACTIVITY_LED_MASK; @@ -3757,14 +3760,13 @@ e1000_phy_hw_reset(struct e1000_hw *hw) /* Wait for FW to finish PHY configuration. */ ret_val = e1000_get_phy_cfg_done(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; e1000_release_software_semaphore(hw); - if ((hw->mac_type == e1000_ich8lan) && - (hw->phy_type == e1000_phy_igp_3)) { - ret_val = e1000_init_lcd_from_nvm(hw); - if (ret_val) - return ret_val; - } + if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3)) + ret_val = e1000_init_lcd_from_nvm(hw); + return ret_val; } @@ -3795,25 +3797,25 @@ e1000_phy_reset(struct e1000_hw *hw) case e1000_82572: case e1000_ich8lan: ret_val = e1000_phy_hw_reset(hw); - if(ret_val) + if (ret_val) return ret_val; break; default: ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= MII_CR_RESET; ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); - if(ret_val) + if (ret_val) return ret_val; udelay(1); break; } - if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) + if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) e1000_phy_init_script(hw); return E1000_SUCCESS; @@ -3891,8 +3893,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) if (hw->kmrn_lock_loss_workaround_disabled) return E1000_SUCCESS; - /* Make sure link is up before proceeding. If not just return. - * Attempting this while link is negotiating fouls up link + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link * stability */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); @@ -3969,34 +3971,34 @@ e1000_detect_gig_phy(struct e1000_hw *hw) hw->phy_id = (uint32_t) (phy_id_high << 16); udelay(20); ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); - if(ret_val) + if (ret_val) return ret_val; hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82543: - if(hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; + if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; break; case e1000_82544: - if(hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; + if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; break; case e1000_82540: case e1000_82545: case e1000_82545_rev_3: case e1000_82546: case e1000_82546_rev_3: - if(hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; + if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; break; case e1000_82541: case e1000_82541_rev_2: case e1000_82547: case e1000_82547_rev_2: - if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; + if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; break; case e1000_82573: - if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; + if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; break; case e1000_80003es2lan: if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; @@ -4035,14 +4037,14 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) do { if (hw->phy_type != e1000_phy_gg82563) { ret_val = e1000_write_phy_reg(hw, 29, 0x001d); - if(ret_val) break; + if (ret_val) break; } ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); - if(ret_val) break; + if (ret_val) break; ret_val = e1000_write_phy_reg(hw, 30, 0x0000); - if(ret_val) break; + if (ret_val) break; ret_val = E1000_SUCCESS; - } while(0); + } while (0); return ret_val; } @@ -4053,7 +4055,7 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) * hw - Struct containing variables accessed by shared code * phy_info - PHY information structure ******************************************************************************/ -static int32_t +int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) { @@ -4074,23 +4076,23 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, /* Check polarity status */ ret_val = e1000_check_polarity(hw, &polarity); - if(ret_val) + if (ret_val) return ret_val; phy_info->cable_polarity = polarity; ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >> IGP01E1000_PSSR_MDIX_SHIFT; - if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { /* Local/Remote Receiver Information are only valid at 1000 Mbps */ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> @@ -4100,19 +4102,19 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, /* Get cable length */ ret_val = e1000_get_cable_length(hw, &min_length, &max_length); - if(ret_val) + if (ret_val) return ret_val; /* Translate to old method */ average = (max_length + min_length) / 2; - if(average <= e1000_igp_cable_length_50) + if (average <= e1000_igp_cable_length_50) phy_info->cable_length = e1000_cable_length_50; - else if(average <= e1000_igp_cable_length_80) + else if (average <= e1000_igp_cable_length_80) phy_info->cable_length = e1000_cable_length_50_80; - else if(average <= e1000_igp_cable_length_110) + else if (average <= e1000_igp_cable_length_110) phy_info->cable_length = e1000_cable_length_80_110; - else if(average <= e1000_igp_cable_length_140) + else if (average <= e1000_igp_cable_length_140) phy_info->cable_length = e1000_cable_length_110_140; else phy_info->cable_length = e1000_cable_length_140; @@ -4188,7 +4190,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, phy_info->downshift = (e1000_downshift)hw->speed_downgraded; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_info->extended_10bt_distance = @@ -4200,12 +4202,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, /* Check polarity status */ ret_val = e1000_check_polarity(hw, &polarity); - if(ret_val) + if (ret_val) return ret_val; phy_info->cable_polarity = polarity; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >> @@ -4228,7 +4230,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, } ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> @@ -4265,20 +4267,20 @@ e1000_phy_get_info(struct e1000_hw *hw, phy_info->local_rx = e1000_1000t_rx_status_undefined; phy_info->remote_rx = e1000_1000t_rx_status_undefined; - if(hw->media_type != e1000_media_type_copper) { + if (hw->media_type != e1000_media_type_copper) { DEBUGOUT("PHY info is only valid for copper media\n"); return -E1000_ERR_CONFIG; } ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - if((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { DEBUGOUT("PHY info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } @@ -4298,7 +4300,7 @@ e1000_validate_mdi_setting(struct e1000_hw *hw) { DEBUGFUNC("e1000_validate_mdi_settings"); - if(!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { DEBUGOUT("Invalid MDI setting detected\n"); hw->mdix = 1; return -E1000_ERR_CONFIG; @@ -4345,7 +4347,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) eeprom->type = e1000_eeprom_microwire; eeprom->opcode_bits = 3; eeprom->delay_usec = 50; - if(eecd & E1000_EECD_SIZE) { + if (eecd & E1000_EECD_SIZE) { eeprom->word_size = 256; eeprom->address_bits = 8; } else { @@ -4413,7 +4415,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) } eeprom->use_eerd = TRUE; eeprom->use_eewr = TRUE; - if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) { + if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { eeprom->type = e1000_eeprom_flash; eeprom->word_size = 2048; @@ -4474,17 +4476,17 @@ e1000_init_eeprom_params(struct e1000_hw *hw) /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to * 32KB (incremented by powers of 2). */ - if(hw->mac_type <= e1000_82547_rev_2) { + if (hw->mac_type <= e1000_82547_rev_2) { /* Set to default value for initial eeprom read. */ eeprom->word_size = 64; ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); - if(ret_val) + if (ret_val) return ret_val; eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; /* 256B eeprom size was not supported in earlier hardware, so we * bump eeprom_size up one to ensure that "1" (which maps to 256B) * is never the result used in the shifting logic below. */ - if(eeprom_size) + if (eeprom_size) eeprom_size++; } else { eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> @@ -4569,7 +4571,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, */ eecd &= ~E1000_EECD_DI; - if(data & mask) + if (data & mask) eecd |= E1000_EECD_DI; E1000_WRITE_REG(hw, EECD, eecd); @@ -4582,7 +4584,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, mask = mask >> 1; - } while(mask); + } while (mask); /* We leave the "DI" bit set to "0" when we leave this routine. */ eecd &= ~E1000_EECD_DI; @@ -4614,14 +4616,14 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw, eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; - for(i = 0; i < count; i++) { + for (i = 0; i < count; i++) { data = data << 1; e1000_raise_ee_clk(hw, &eecd); eecd = E1000_READ_REG(hw, EECD); eecd &= ~(E1000_EECD_DI); - if(eecd & E1000_EECD_DO) + if (eecd & E1000_EECD_DO) data |= 1; e1000_lower_ee_clk(hw, &eecd); @@ -4652,17 +4654,17 @@ e1000_acquire_eeprom(struct e1000_hw *hw) if (hw->mac_type != e1000_82573) { /* Request EEPROM Access */ - if(hw->mac_type > e1000_82544) { + if (hw->mac_type > e1000_82544) { eecd |= E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); eecd = E1000_READ_REG(hw, EECD); - while((!(eecd & E1000_EECD_GNT)) && + while ((!(eecd & E1000_EECD_GNT)) && (i < E1000_EEPROM_GRANT_ATTEMPTS)) { i++; udelay(5); eecd = E1000_READ_REG(hw, EECD); } - if(!(eecd & E1000_EECD_GNT)) { + if (!(eecd & E1000_EECD_GNT)) { eecd &= ~E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); DEBUGOUT("Could not acquire EEPROM grant\n"); @@ -4705,7 +4707,7 @@ e1000_standby_eeprom(struct e1000_hw *hw) eecd = E1000_READ_REG(hw, EECD); - if(eeprom->type == e1000_eeprom_microwire) { + if (eeprom->type == e1000_eeprom_microwire) { eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); E1000_WRITE_REG(hw, EECD, eecd); E1000_WRITE_FLUSH(hw); @@ -4728,7 +4730,7 @@ e1000_standby_eeprom(struct e1000_hw *hw) E1000_WRITE_REG(hw, EECD, eecd); E1000_WRITE_FLUSH(hw); udelay(eeprom->delay_usec); - } else if(eeprom->type == e1000_eeprom_spi) { + } else if (eeprom->type == e1000_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= E1000_EECD_CS; E1000_WRITE_REG(hw, EECD, eecd); @@ -4762,7 +4764,7 @@ e1000_release_eeprom(struct e1000_hw *hw) E1000_WRITE_REG(hw, EECD, eecd); udelay(hw->eeprom.delay_usec); - } else if(hw->eeprom.type == e1000_eeprom_microwire) { + } else if (hw->eeprom.type == e1000_eeprom_microwire) { /* cleanup eeprom */ /* CS on Microwire is active-high */ @@ -4784,7 +4786,7 @@ e1000_release_eeprom(struct e1000_hw *hw) } /* Stop requesting EEPROM access */ - if(hw->mac_type > e1000_82544) { + if (hw->mac_type > e1000_82544) { eecd &= ~E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); } @@ -4822,12 +4824,12 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw) retry_count += 5; e1000_standby_eeprom(hw); - } while(retry_count < EEPROM_MAX_RETRY_SPI); + } while (retry_count < EEPROM_MAX_RETRY_SPI); /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and * only 0-5mSec on 5V devices) */ - if(retry_count >= EEPROM_MAX_RETRY_SPI) { + if (retry_count >= EEPROM_MAX_RETRY_SPI) { DEBUGOUT("SPI EEPROM Status error\n"); return -E1000_ERR_EEPROM; } @@ -4858,7 +4860,7 @@ e1000_read_eeprom(struct e1000_hw *hw, /* A check for invalid values: offset too large, too many words, and not * enough words. */ - if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || + if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { DEBUGOUT("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; @@ -4866,7 +4868,7 @@ e1000_read_eeprom(struct e1000_hw *hw, /* FLASH reads without acquiring the semaphore are safe */ if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && - hw->eeprom.use_eerd == FALSE) { + hw->eeprom.use_eerd == FALSE) { switch (hw->mac_type) { case e1000_80003es2lan: break; @@ -4893,7 +4895,7 @@ e1000_read_eeprom(struct e1000_hw *hw, uint16_t word_in; uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; - if(e1000_spi_eeprom_ready(hw)) { + if (e1000_spi_eeprom_ready(hw)) { e1000_release_eeprom(hw); return -E1000_ERR_EEPROM; } @@ -4901,7 +4903,7 @@ e1000_read_eeprom(struct e1000_hw *hw, e1000_standby_eeprom(hw); /* Some SPI eeproms use the 8th address bit embedded in the opcode */ - if((eeprom->address_bits == 8) && (offset >= 128)) + if ((eeprom->address_bits == 8) && (offset >= 128)) read_opcode |= EEPROM_A8_OPCODE_SPI; /* Send the READ command (opcode + addr) */ @@ -4917,7 +4919,7 @@ e1000_read_eeprom(struct e1000_hw *hw, word_in = e1000_shift_in_ee_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); } - } else if(eeprom->type == e1000_eeprom_microwire) { + } else if (eeprom->type == e1000_eeprom_microwire) { for (i = 0; i < words; i++) { /* Send the READ command (opcode + addr) */ e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, @@ -4962,7 +4964,7 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw, E1000_WRITE_REG(hw, EERD, eerd); error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); - if(error) { + if (error) { break; } data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); @@ -4999,7 +5001,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, E1000_EEPROM_RW_REG_START; error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); - if(error) { + if (error) { break; } @@ -5007,7 +5009,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); - if(error) { + if (error) { break; } } @@ -5028,13 +5030,13 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) uint32_t i, reg = 0; int32_t done = E1000_ERR_EEPROM; - for(i = 0; i < attempts; i++) { - if(eerd == E1000_EEPROM_POLL_READ) + for (i = 0; i < attempts; i++) { + if (eerd == E1000_EEPROM_POLL_READ) reg = E1000_READ_REG(hw, EERD); else reg = E1000_READ_REG(hw, EEWR); - if(reg & E1000_EEPROM_RW_REG_DONE) { + if (reg & E1000_EEPROM_RW_REG_DONE) { done = E1000_SUCCESS; break; } @@ -5066,7 +5068,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) eecd = ((eecd >> 15) & 0x03); /* If both bits are set, device is Flash type */ - if(eecd == 0x03) { + if (eecd == 0x03) { return FALSE; } } @@ -5131,7 +5133,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) checksum += eeprom_data; } - if(checksum == (uint16_t) EEPROM_SUM) + if (checksum == (uint16_t) EEPROM_SUM) return E1000_SUCCESS; else { DEBUGOUT("EEPROM Checksum Invalid\n"); @@ -5156,15 +5158,15 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) DEBUGFUNC("e1000_update_eeprom_checksum"); - for(i = 0; i < EEPROM_CHECKSUM_REG; i++) { - if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } checksum += eeprom_data; } checksum = (uint16_t) EEPROM_SUM - checksum; - if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { DEBUGOUT("EEPROM Write Error\n"); return -E1000_ERR_EEPROM; } else if (hw->eeprom.type == e1000_eeprom_flash) { @@ -5206,14 +5208,14 @@ e1000_write_eeprom(struct e1000_hw *hw, /* A check for invalid values: offset too large, too many words, and not * enough words. */ - if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || + if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { DEBUGOUT("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } /* 82573 writes only through eewr */ - if(eeprom->use_eewr == TRUE) + if (eeprom->use_eewr == TRUE) return e1000_write_eeprom_eewr(hw, offset, words, data); if (eeprom->type == e1000_eeprom_ich8) @@ -5223,7 +5225,7 @@ e1000_write_eeprom(struct e1000_hw *hw, if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) return -E1000_ERR_EEPROM; - if(eeprom->type == e1000_eeprom_microwire) { + if (eeprom->type == e1000_eeprom_microwire) { status = e1000_write_eeprom_microwire(hw, offset, words, data); } else { status = e1000_write_eeprom_spi(hw, offset, words, data); @@ -5259,7 +5261,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, while (widx < words) { uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; - if(e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; + if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; e1000_standby_eeprom(hw); @@ -5270,7 +5272,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, e1000_standby_eeprom(hw); /* Some SPI eeproms use the 8th address bit embedded in the opcode */ - if((eeprom->address_bits == 8) && (offset >= 128)) + if ((eeprom->address_bits == 8) && (offset >= 128)) write_opcode |= EEPROM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ @@ -5292,7 +5294,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, * operation, while the smaller eeproms are capable of an 8-byte * PAGE WRITE operation. Break the inner loop to pass new address */ - if((((offset + widx)*2) % eeprom->page_size) == 0) { + if ((((offset + widx)*2) % eeprom->page_size) == 0) { e1000_standby_eeprom(hw); break; } @@ -5358,12 +5360,12 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, * signal that the command has been completed by raising the DO signal. * If DO does not go high in 10 milliseconds, then error out. */ - for(i = 0; i < 200; i++) { + for (i = 0; i < 200; i++) { eecd = E1000_READ_REG(hw, EECD); - if(eecd & E1000_EECD_DO) break; + if (eecd & E1000_EECD_DO) break; udelay(50); } - if(i == 200) { + if (i == 200) { DEBUGOUT("EEPROM Write did not complete\n"); return -E1000_ERR_EEPROM; } @@ -5569,7 +5571,7 @@ e1000_read_part_num(struct e1000_hw *hw, DEBUGFUNC("e1000_read_part_num"); /* Get word 0 from EEPROM */ - if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } @@ -5577,7 +5579,7 @@ e1000_read_part_num(struct e1000_hw *hw, *part_num = (uint32_t) (eeprom_data << 16); /* Get word 1 from EEPROM */ - if(e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) { + if (e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } @@ -5601,9 +5603,9 @@ e1000_read_mac_addr(struct e1000_hw * hw) DEBUGFUNC("e1000_read_mac_addr"); - for(i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { offset = i >> 1; - if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } @@ -5618,12 +5620,12 @@ e1000_read_mac_addr(struct e1000_hw * hw) case e1000_82546_rev_3: case e1000_82571: case e1000_80003es2lan: - if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) + if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) hw->perm_mac_addr[5] ^= 0x01; break; } - for(i = 0; i < NODE_ADDRESS_SIZE; i++) + for (i = 0; i < NODE_ADDRESS_SIZE; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; return E1000_SUCCESS; } @@ -5662,7 +5664,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw) /* Zero out the other 15 receive addresses. */ DEBUGOUT("Clearing RAR[1-15]\n"); - for(i = 1; i < rar_num; i++) { + for (i = 1; i < rar_num; i++) { E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); @@ -5713,7 +5715,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) num_rar_entry -= 1; - for(i = rar_used_count; i < num_rar_entry; i++) { + for (i = rar_used_count; i < num_rar_entry; i++) { E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); @@ -5725,13 +5727,13 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, num_mta_entry = E1000_NUM_MTA_REGISTERS; if (hw->mac_type == e1000_ich8lan) num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN; - for(i = 0; i < num_mta_entry; i++) { + for (i = 0; i < num_mta_entry; i++) { E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); E1000_WRITE_FLUSH(hw); } /* Add the new addresses */ - for(i = 0; i < mc_addr_count; i++) { + for (i = 0; i < mc_addr_count; i++) { DEBUGOUT(" Adding the multicast addresses:\n"); DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)], @@ -5863,7 +5865,7 @@ e1000_mta_set(struct e1000_hw *hw, * in the MTA, save off the previous entry before writing and * restore the old value after writing. */ - if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { + if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); E1000_WRITE_FLUSH(hw); @@ -6013,7 +6015,7 @@ e1000_id_led_init(struct e1000_hw * hw) DEBUGFUNC("e1000_id_led_init"); - if(hw->mac_type < e1000_82540) { + if (hw->mac_type < e1000_82540) { /* Nothing to do */ return E1000_SUCCESS; } @@ -6023,7 +6025,7 @@ e1000_id_led_init(struct e1000_hw * hw) hw->ledctl_mode1 = hw->ledctl_default; hw->ledctl_mode2 = hw->ledctl_default; - if(e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } @@ -6040,7 +6042,7 @@ e1000_id_led_init(struct e1000_hw * hw) } for (i = 0; i < 4; i++) { temp = (eeprom_data >> (i << 2)) & led_mask; - switch(temp) { + switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: @@ -6057,7 +6059,7 @@ e1000_id_led_init(struct e1000_hw * hw) /* Do nothing */ break; } - switch(temp) { + switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: @@ -6091,7 +6093,7 @@ e1000_setup_led(struct e1000_hw *hw) DEBUGFUNC("e1000_setup_led"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: @@ -6105,16 +6107,16 @@ e1000_setup_led(struct e1000_hw *hw) /* Turn off PHY Smart Power Down (if enabled) */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &hw->phy_spd_default); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, (uint16_t)(hw->phy_spd_default & ~IGP01E1000_GMII_SPD)); - if(ret_val) + if (ret_val) return ret_val; /* Fall Through */ default: - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { ledctl = E1000_READ_REG(hw, LEDCTL); /* Save current LEDCTL settings */ hw->ledctl_default = ledctl; @@ -6125,7 +6127,7 @@ e1000_setup_led(struct e1000_hw *hw) ledctl |= (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT); E1000_WRITE_REG(hw, LEDCTL, ledctl); - } else if(hw->media_type == e1000_media_type_copper) + } else if (hw->media_type == e1000_media_type_copper) E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); break; } @@ -6133,6 +6135,7 @@ e1000_setup_led(struct e1000_hw *hw) return E1000_SUCCESS; } + /****************************************************************************** * Used on 82571 and later Si that has LED blink bits. * Callers must use their own timer and should have already called @@ -6183,7 +6186,7 @@ e1000_cleanup_led(struct e1000_hw *hw) DEBUGFUNC("e1000_cleanup_led"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: @@ -6197,7 +6200,7 @@ e1000_cleanup_led(struct e1000_hw *hw) /* Turn on PHY Smart Power Down (if previously enabled) */ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, hw->phy_spd_default); - if(ret_val) + if (ret_val) return ret_val; /* Fall Through */ default: @@ -6225,7 +6228,7 @@ e1000_led_on(struct e1000_hw *hw) DEBUGFUNC("e1000_led_on"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: @@ -6234,7 +6237,7 @@ e1000_led_on(struct e1000_hw *hw) ctrl |= E1000_CTRL_SWDPIO0; break; case e1000_82544: - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { /* Set SW Defineable Pin 0 to turn on the LED */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; @@ -6245,7 +6248,7 @@ e1000_led_on(struct e1000_hw *hw) } break; default: - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { /* Clear SW Defineable Pin 0 to turn on the LED */ ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; @@ -6276,7 +6279,7 @@ e1000_led_off(struct e1000_hw *hw) DEBUGFUNC("e1000_led_off"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: case e1000_82543: @@ -6285,7 +6288,7 @@ e1000_led_off(struct e1000_hw *hw) ctrl |= E1000_CTRL_SWDPIO0; break; case e1000_82544: - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { /* Clear SW Defineable Pin 0 to turn off the LED */ ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; @@ -6296,7 +6299,7 @@ e1000_led_off(struct e1000_hw *hw) } break; default: - if(hw->media_type == e1000_media_type_fiber) { + if (hw->media_type == e1000_media_type_fiber) { /* Set SW Defineable Pin 0 to turn off the LED */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; @@ -6320,7 +6323,7 @@ e1000_led_off(struct e1000_hw *hw) * * hw - Struct containing variables accessed by shared code *****************************************************************************/ -static void +void e1000_clear_hw_cntrs(struct e1000_hw *hw) { volatile uint32_t temp; @@ -6383,7 +6386,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) temp = E1000_READ_REG(hw, MPTC); temp = E1000_READ_REG(hw, BPTC); - if(hw->mac_type < e1000_82543) return; + if (hw->mac_type < e1000_82543) return; temp = E1000_READ_REG(hw, ALGNERRC); temp = E1000_READ_REG(hw, RXERRC); @@ -6392,13 +6395,13 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) temp = E1000_READ_REG(hw, TSCTC); temp = E1000_READ_REG(hw, TSCTFC); - if(hw->mac_type <= e1000_82544) return; + if (hw->mac_type <= e1000_82544) return; temp = E1000_READ_REG(hw, MGTPRC); temp = E1000_READ_REG(hw, MGTPDC); temp = E1000_READ_REG(hw, MGTPTC); - if(hw->mac_type <= e1000_82547_rev_2) return; + if (hw->mac_type <= e1000_82547_rev_2) return; temp = E1000_READ_REG(hw, IAC); temp = E1000_READ_REG(hw, ICRXOC); @@ -6429,8 +6432,8 @@ e1000_reset_adaptive(struct e1000_hw *hw) { DEBUGFUNC("e1000_reset_adaptive"); - if(hw->adaptive_ifs) { - if(!hw->ifs_params_forced) { + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { hw->current_ifs_val = 0; hw->ifs_min_val = IFS_MIN; hw->ifs_max_val = IFS_MAX; @@ -6457,12 +6460,12 @@ e1000_update_adaptive(struct e1000_hw *hw) { DEBUGFUNC("e1000_update_adaptive"); - if(hw->adaptive_ifs) { - if((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { - if(hw->tx_packet_delta > MIN_NUM_XMITS) { + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { hw->in_ifs_mode = TRUE; - if(hw->current_ifs_val < hw->ifs_max_val) { - if(hw->current_ifs_val == 0) + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) hw->current_ifs_val = hw->ifs_min_val; else hw->current_ifs_val += hw->ifs_step_size; @@ -6470,7 +6473,7 @@ e1000_update_adaptive(struct e1000_hw *hw) } } } else { - if(hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { hw->current_ifs_val = 0; hw->in_ifs_mode = FALSE; E1000_WRITE_REG(hw, AIT, 0); @@ -6517,46 +6520,46 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw, * This could be simplified if all environments supported * 64-bit integers. */ - if(carry_bit && ((stats->gorcl & 0x80000000) == 0)) + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) stats->gorch++; /* Is this a broadcast or multicast? Check broadcast first, * since the test for a multicast frame will test positive on * a broadcast frame. */ - if((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) + if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) /* Broadcast packet */ stats->bprc++; - else if(*mac_addr & 0x01) + else if (*mac_addr & 0x01) /* Multicast packet */ stats->mprc++; - if(frame_len == hw->max_frame_size) { + if (frame_len == hw->max_frame_size) { /* In this case, the hardware has overcounted the number of * oversize frames. */ - if(stats->roc > 0) + if (stats->roc > 0) stats->roc--; } /* Adjust the bin counters when the extra byte put the frame in the * wrong bin. Remember that the frame_len was adjusted above. */ - if(frame_len == 64) { + if (frame_len == 64) { stats->prc64++; stats->prc127--; - } else if(frame_len == 127) { + } else if (frame_len == 127) { stats->prc127++; stats->prc255--; - } else if(frame_len == 255) { + } else if (frame_len == 255) { stats->prc255++; stats->prc511--; - } else if(frame_len == 511) { + } else if (frame_len == 511) { stats->prc511++; stats->prc1023--; - } else if(frame_len == 1023) { + } else if (frame_len == 1023) { stats->prc1023++; stats->prc1522--; - } else if(frame_len == 1522) { + } else if (frame_len == 1522) { stats->prc1522++; } } @@ -6596,10 +6599,10 @@ e1000_get_bus_info(struct e1000_hw *hw) hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? e1000_bus_type_pcix : e1000_bus_type_pci; - if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? e1000_bus_speed_66 : e1000_bus_speed_120; - } else if(hw->bus_type == e1000_bus_type_pci) { + } else if (hw->bus_type == e1000_bus_type_pci) { hw->bus_speed = (status & E1000_STATUS_PCI66) ? e1000_bus_speed_66 : e1000_bus_speed_33; } else { @@ -6694,11 +6697,11 @@ e1000_get_cable_length(struct e1000_hw *hw, *min_length = *max_length = 0; /* Use old method for Phy older than IGP */ - if(hw->phy_type == e1000_phy_m88) { + if (hw->phy_type == e1000_phy_m88) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; @@ -6757,7 +6760,7 @@ e1000_get_cable_length(struct e1000_hw *hw, return -E1000_ERR_PHY; break; } - } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ uint16_t cur_agc_value; uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = @@ -6766,10 +6769,10 @@ e1000_get_cable_length(struct e1000_hw *hw, IGP01E1000_PHY_AGC_C, IGP01E1000_PHY_AGC_D}; /* Read the AGC registers for all channels */ - for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); - if(ret_val) + if (ret_val) return ret_val; cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; @@ -6819,7 +6822,7 @@ e1000_get_cable_length(struct e1000_hw *hw, if (ret_val) return ret_val; - /* Getting bits 15:9, which represent the combination of course and + /* Getting bits 15:9, which represent the combination of course and * fine gain values. The result is a number that can be put into * the lookup table to obtain the approximate cable length. */ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & @@ -6884,7 +6887,7 @@ e1000_check_polarity(struct e1000_hw *hw, /* return the Polarity bit in the Status register. */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> M88E1000_PSSR_REV_POLARITY_SHIFT; @@ -6894,18 +6897,18 @@ e1000_check_polarity(struct e1000_hw *hw, /* Read the Status register to check the speed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to * find the polarity status */ - if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { /* Read the GIG initialization PCS register (0x00B4) */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, &phy_data); - if(ret_val) + if (ret_val) return ret_val; /* Check the polarity bits */ @@ -6954,7 +6957,7 @@ e1000_check_downshift(struct e1000_hw *hw) hw->phy_type == e1000_phy_igp_2) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, &phy_data); - if(ret_val) + if (ret_val) return ret_val; hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; @@ -6962,7 +6965,7 @@ e1000_check_downshift(struct e1000_hw *hw) (hw->phy_type == e1000_phy_gg82563)) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> @@ -7002,42 +7005,42 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, DEBUGFUNC("e1000_config_dsp_after_link_change"); - if(hw->phy_type != e1000_phy_igp) + if (hw->phy_type != e1000_phy_igp) return E1000_SUCCESS; - if(link_up) { + if (link_up) { ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); - if(ret_val) { + if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); return ret_val; } - if(speed == SPEED_1000) { + if (speed == SPEED_1000) { ret_val = e1000_get_cable_length(hw, &min_length, &max_length); if (ret_val) return ret_val; - if((hw->dsp_config_state == e1000_dsp_config_enabled) && + if ((hw->dsp_config_state == e1000_dsp_config_enabled) && min_length >= e1000_igp_cable_length_50) { - for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], phy_data); - if(ret_val) + if (ret_val) return ret_val; } hw->dsp_config_state = e1000_dsp_config_activated; } - if((hw->ffe_config_state == e1000_ffe_config_enabled) && + if ((hw->ffe_config_state == e1000_ffe_config_enabled) && (min_length < e1000_igp_cable_length_50)) { uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; @@ -7046,70 +7049,70 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, /* clear previous idle error counts */ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; - for(i = 0; i < ffe_idle_err_timeout; i++) { + for (i = 0; i < ffe_idle_err_timeout; i++) { udelay(1000); ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if(ret_val) + if (ret_val) return ret_val; idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); - if(idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { hw->ffe_config_state = e1000_ffe_config_active; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_CM_CP); - if(ret_val) + if (ret_val) return ret_val; break; } - if(idle_errs) + if (idle_errs) ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100; } } } } else { - if(hw->dsp_config_state == e1000_dsp_config_activated) { + if (hw->dsp_config_state == e1000_dsp_config_activated) { /* Save off the current value of register 0x2F5B to be restored at * the end of the routines. */ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); - if(ret_val) + if (ret_val) return ret_val; /* Disable the PHY transmitter */ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(20); ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIGA); - if(ret_val) + if (ret_val) return ret_val; - for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data); - if(ret_val) + if (ret_val) return ret_val; } ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(20); @@ -7117,40 +7120,40 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, /* Now enable the transmitter */ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - if(ret_val) + if (ret_val) return ret_val; hw->dsp_config_state = e1000_dsp_config_enabled; } - if(hw->ffe_config_state == e1000_ffe_config_active) { + if (hw->ffe_config_state == e1000_ffe_config_active) { /* Save off the current value of register 0x2F5B to be restored at * the end of the routines. */ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); - if(ret_val) + if (ret_val) return ret_val; /* Disable the PHY transmitter */ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(20); ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIGA); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_DEFAULT); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(20); @@ -7158,7 +7161,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, /* Now enable the transmitter */ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - if(ret_val) + if (ret_val) return ret_val; hw->ffe_config_state = e1000_ffe_config_enabled; @@ -7183,20 +7186,20 @@ e1000_set_phy_mode(struct e1000_hw *hw) DEBUGFUNC("e1000_set_phy_mode"); - if((hw->mac_type == e1000_82545_rev_3) && - (hw->media_type == e1000_media_type_copper)) { + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); - if(ret_val) { + if (ret_val) { return ret_val; } - if((eeprom_data != EEPROM_RESERVED_WORD) && - (eeprom_data & EEPROM_PHY_CLASS_A)) { + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104); - if(ret_val) + if (ret_val) return ret_val; hw->phy_reset_disable = FALSE; @@ -7247,16 +7250,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); } else { ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); - if(ret_val) + if (ret_val) return ret_val; } - if(!active) { - if(hw->mac_type == e1000_82541_rev_2 || - hw->mac_type == e1000_82547_rev_2) { + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { phy_data &= ~IGP01E1000_GMII_FLEX_SPD; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); - if(ret_val) + if (ret_val) return ret_val; } else { if (hw->mac_type == e1000_ich8lan) { @@ -7278,13 +7281,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, if (hw->smart_speed == e1000_smart_speed_on) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } else if (hw->smart_speed == e1000_smart_speed_off) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, @@ -7295,19 +7298,19 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } - } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || - (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || - (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { - if(hw->mac_type == e1000_82541_rev_2 || + if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { phy_data |= IGP01E1000_GMII_FLEX_SPD; ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); - if(ret_val) + if (ret_val) return ret_val; } else { if (hw->mac_type == e1000_ich8lan) { @@ -7324,12 +7327,12 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, /* When LPLU is enabled we should disable SmartSpeed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -7359,14 +7362,14 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, uint16_t phy_data; DEBUGFUNC("e1000_set_d0_lplu_state"); - if(hw->mac_type <= e1000_82547_rev_2) + if (hw->mac_type <= e1000_82547_rev_2) return E1000_SUCCESS; if (hw->mac_type == e1000_ich8lan) { phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); } else { ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -7388,13 +7391,13 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, if (hw->smart_speed == e1000_smart_speed_on) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } else if (hw->smart_speed == e1000_smart_speed_off) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, @@ -7405,7 +7408,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -7424,12 +7427,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, /* When LPLU is enabled we should disable SmartSpeed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); - if(ret_val) + if (ret_val) return ret_val; } @@ -7450,7 +7453,7 @@ e1000_set_vco_speed(struct e1000_hw *hw) DEBUGFUNC("e1000_set_vco_speed"); - switch(hw->mac_type) { + switch (hw->mac_type) { case e1000_82545_rev_3: case e1000_82546_rev_3: break; @@ -7461,39 +7464,39 @@ e1000_set_vco_speed(struct e1000_hw *hw) /* Set PHY register 30, page 5, bit 8 to 0 */ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); - if(ret_val) + if (ret_val) return ret_val; /* Set PHY register 30, page 4, bit 11 to 1 */ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); - if(ret_val) + if (ret_val) return ret_val; phy_data |= M88E1000_PHY_VCO_REG_BIT11; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); - if(ret_val) + if (ret_val) return ret_val; return E1000_SUCCESS; @@ -7572,7 +7575,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, { uint8_t *tmp; uint8_t *bufptr = buffer; - uint32_t data; + uint32_t data = 0; uint16_t remaining, i, j, prev_bytes; /* sum = only sum of the data and it is not checksum */ @@ -7652,7 +7655,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, buffer = (uint8_t *) hdr; i = length; - while(i--) + while (i--) sum += buffer[i]; hdr->checksum = 0 - sum; @@ -7675,8 +7678,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, * returns - E1000_SUCCESS for success. ****************************************************************************/ static int32_t -e1000_mng_write_commit( - struct e1000_hw * hw) +e1000_mng_write_commit(struct e1000_hw * hw) { uint32_t hicr; @@ -7848,31 +7850,31 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw) /* Disable the transmitter on the PHY */ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); - if(ret_val) + if (ret_val) return ret_val; /* This loop will early-out if the NO link condition has been met. */ - for(i = PHY_FORCE_TIME; i > 0; i--) { + for (i = PHY_FORCE_TIME; i > 0; i--) { /* Read the MII Status Register and wait for Link Status bit * to be clear. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; - if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break; + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break; msec_delay_irq(100); } @@ -7882,40 +7884,40 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw) /* Now we will re-enable th transmitter on the PHY */ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(50); ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(50); ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); - if(ret_val) + if (ret_val) return ret_val; msec_delay_irq(50); ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); - if(ret_val) + if (ret_val) return ret_val; /* This loop will early-out if the link condition has been met. */ - for(i = PHY_FORCE_TIME; i > 0; i--) { + for (i = PHY_FORCE_TIME; i > 0; i--) { /* Read the MII Status Register and wait for Link Status bit * to be set. */ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if(ret_val) + if (ret_val) return ret_val; - if(mii_status_reg & MII_SR_LINK_STATUS) break; + if (mii_status_reg & MII_SR_LINK_STATUS) break; msec_delay_irq(100); } return E1000_SUCCESS; @@ -7994,15 +7996,15 @@ e1000_disable_pciex_master(struct e1000_hw *hw) e1000_set_pci_express_master_disable(hw); - while(timeout) { - if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + while (timeout) { + if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; else udelay(100); timeout--; } - if(!timeout) { + if (!timeout) { DEBUGOUT("Master requests are pending.\n"); return -E1000_ERR_MASTER_REQUESTS_PENDING; } @@ -8043,7 +8045,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) timeout--; } - if(!timeout) { + if (!timeout) { DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); return -E1000_ERR_RESET; } @@ -8124,7 +8126,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); - if(!hw->eeprom_semaphore_present) + if (!hw->eeprom_semaphore_present) return E1000_SUCCESS; if (hw->mac_type == e1000_80003es2lan) { @@ -8135,20 +8137,20 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) /* Get the FW semaphore. */ timeout = hw->eeprom.word_size + 1; - while(timeout) { + while (timeout) { swsm = E1000_READ_REG(hw, SWSM); swsm |= E1000_SWSM_SWESMBI; E1000_WRITE_REG(hw, SWSM, swsm); /* if we managed to set the bit we got the semaphore. */ swsm = E1000_READ_REG(hw, SWSM); - if(swsm & E1000_SWSM_SWESMBI) + if (swsm & E1000_SWSM_SWESMBI) break; udelay(50); timeout--; } - if(!timeout) { + if (!timeout) { /* Release semaphores */ e1000_put_hw_eeprom_semaphore(hw); DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); @@ -8173,7 +8175,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); - if(!hw->eeprom_semaphore_present) + if (!hw->eeprom_semaphore_present) return; swsm = E1000_READ_REG(hw, SWSM); @@ -8206,16 +8208,16 @@ e1000_get_software_semaphore(struct e1000_hw *hw) if (hw->mac_type != e1000_80003es2lan) return E1000_SUCCESS; - while(timeout) { + while (timeout) { swsm = E1000_READ_REG(hw, SWSM); /* If SMBI bit cleared, it is now set and we hold the semaphore */ - if(!(swsm & E1000_SWSM_SMBI)) + if (!(swsm & E1000_SWSM_SMBI)) break; msec_delay_irq(1); timeout--; } - if(!timeout) { + if (!timeout) { DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); return -E1000_ERR_RESET; } @@ -8291,7 +8293,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) case e1000_82573: case e1000_80003es2lan: fwsm = E1000_READ_REG(hw, FWSM); - if((fwsm & E1000_FWSM_MODE_MASK) != 0) + if ((fwsm & E1000_FWSM_MODE_MASK) != 0) return TRUE; break; case e1000_ich8lan: diff --git a/trunk/drivers/net/e1000/e1000_hw.h b/trunk/drivers/net/e1000/e1000_hw.h index 375b95518c31..4f74242746fa 100644 --- a/trunk/drivers/net/e1000/e1000_hw.h +++ b/trunk/drivers/net/e1000/e1000_hw.h @@ -336,9 +336,9 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ #define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ -#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ -#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ -#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 #define E1000_MNG_ICH_IAMT_MODE 0x2 #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ @@ -385,7 +385,7 @@ struct e1000_host_mng_dhcp_cookie{ #endif int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, - uint16_t length); + uint16_t length); boolean_t e1000_check_mng_mode(struct e1000_hw *hw); boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); @@ -523,7 +523,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); /* 802.1q VLAN Packet Sizes */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ @@ -697,6 +697,7 @@ union e1000_rx_desc_packet_split { E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_RXE) + /* Transmit Descriptor */ struct e1000_tx_desc { uint64_t buffer_addr; /* Address of the descriptor's data buffer */ @@ -2086,7 +2087,7 @@ struct e1000_hw { #define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address * filtering */ #define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ -#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ @@ -2172,7 +2173,7 @@ struct e1000_host_command_info { #define E1000_MDALIGN 4096 -/* PCI-Ex registers */ +/* PCI-Ex registers*/ /* PCI-Ex Control Register */ #define E1000_GCR_RXD_NO_SNOOP 0x00000001 @@ -2224,7 +2225,7 @@ struct e1000_host_command_info { #define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ /* EEPROM Commands - SPI */ -#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ #define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ #define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ @@ -3082,10 +3083,10 @@ struct e1000_host_command_info { /* DSP Distance Register (Page 5, Register 26) */ #define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; - 1 = 50-80M; - 2 = 80-110M; - 3 = 110-140M; - 4 = >140M */ + 1 = 50-80M; + 2 = 80-110M; + 3 = 110-140M; + 4 = >140M */ /* Kumeran Mode Control Register (Page 193, Register 16) */ #define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index 0cf9ff2462ba..610a0cdbb68b 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -2439,10 +2439,9 @@ e1000_watchdog(unsigned long data) * disable receives in the ISR and * reset device here in the watchdog */ - if (adapter->hw.mac_type == e1000_80003es2lan) { + if (adapter->hw.mac_type == e1000_80003es2lan) /* reset device */ schedule_work(&adapter->reset_task); - } } e1000_smartspeed(adapter); @@ -3677,7 +3676,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); /* recycle */ - buffer_info-> skb = skb; + buffer_info->skb = skb; goto next_desc; } diff --git a/trunk/drivers/net/skge.c b/trunk/drivers/net/skge.c index a1fc04365e07..1b752141a4f3 100644 --- a/trunk/drivers/net/skge.c +++ b/trunk/drivers/net/skge.c @@ -43,7 +43,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.7" +#define DRV_VERSION "1.6" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -827,8 +827,7 @@ static int skge_rx_fill(struct skge_port *skge) do { struct sk_buff *skb; - skb = __dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, - GFP_KERNEL); + skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -2610,7 +2609,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, goto error; if (len < RX_COPY_THRESHOLD) { - skb = dev_alloc_skb(len + 2); + skb = alloc_skb(len + 2, GFP_ATOMIC); if (!skb) goto resubmit; @@ -2625,7 +2624,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; - nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); + nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); if (!nskb) goto resubmit; @@ -2748,7 +2747,7 @@ static int skge_poll(struct net_device *dev, int *budget) spin_lock_irq(&hw->hw_lock); hw->intr_mask |= rxirqmask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); + mmiowb(); spin_unlock_irq(&hw->hw_lock); return 0; @@ -2882,7 +2881,6 @@ static void skge_extirq(void *arg) spin_lock_irq(&hw->hw_lock); hw->intr_mask |= IS_EXT_REG; skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); } @@ -2957,7 +2955,6 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) skge_error_irq(hw); skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); spin_unlock(&hw->hw_lock); return IRQ_HANDLED; @@ -3109,6 +3106,7 @@ static int skge_reset(struct skge_hw *hw) else hw->ram_size = t8 * 4096; + spin_lock_init(&hw->hw_lock); hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; if (hw->ports > 1) hw->intr_mask |= IS_PORT_2; @@ -3334,7 +3332,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, hw->pdev = pdev; mutex_init(&hw->phy_mutex); INIT_WORK(&hw->phy_work, skge_extirq, hw); - spin_lock_init(&hw->hw_lock); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { @@ -3343,16 +3340,23 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_hw; } + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw); + if (err) { + printk(KERN_ERR PFX "%s: cannot assign irq %d\n", + pci_name(pdev), pdev->irq); + goto err_out_iounmap; + } + pci_set_drvdata(pdev, hw); + err = skge_reset(hw); if (err) - goto err_out_iounmap; + goto err_out_free_irq; printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, skge_board_name(hw), hw->chip_rev); - dev = skge_devinit(hw, 0, using_dac); - if (!dev) + if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) goto err_out_led_off; if (!is_valid_ether_addr(dev->dev_addr)) { @@ -3362,6 +3366,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } + err = register_netdev(dev); if (err) { printk(KERN_ERR PFX "%s: cannot register net device\n", @@ -3369,12 +3374,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } - err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); - if (err) { - printk(KERN_ERR PFX "%s: cannot assign irq %d\n", - dev->name, pdev->irq); - goto err_out_unregister; - } skge_show_addr(dev); if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { @@ -3387,16 +3386,15 @@ static int __devinit skge_probe(struct pci_dev *pdev, free_netdev(dev1); } } - pci_set_drvdata(pdev, hw); return 0; -err_out_unregister: - unregister_netdev(dev); err_out_free_netdev: free_netdev(dev); err_out_led_off: skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_free_irq: + free_irq(pdev->irq, hw); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: @@ -3426,7 +3424,6 @@ static void __devexit skge_remove(struct pci_dev *pdev) spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; skge_write32(hw, B0_IMSK, 0); - skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); skge_write16(hw, B0_LED, LED_STAT_OFF); @@ -3452,25 +3449,26 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) struct skge_hw *hw = pci_get_drvdata(pdev); int i, wol = 0; - pci_save_state(pdev); - for (i = 0; i < hw->ports; i++) { + for (i = 0; i < 2; i++) { struct net_device *dev = hw->dev[i]; - if (netif_running(dev)) { + if (dev) { struct skge_port *skge = netdev_priv(dev); - - netif_carrier_off(dev); - if (skge->wol) - netif_stop_queue(dev); - else - skge_down(dev); + if (netif_running(dev)) { + netif_carrier_off(dev); + if (skge->wol) + netif_stop_queue(dev); + else + skge_down(dev); + } + netif_device_detach(dev); wol |= skge->wol; } - netif_device_detach(dev); } - skge_write32(hw, B0_IMSK, 0); + pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); + pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; @@ -3479,33 +3477,23 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) static int skge_resume(struct pci_dev *pdev) { struct skge_hw *hw = pci_get_drvdata(pdev); - int i, err; + int i; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); - err = skge_reset(hw); - if (err) - goto out; + skge_reset(hw); - for (i = 0; i < hw->ports; i++) { + for (i = 0; i < 2; i++) { struct net_device *dev = hw->dev[i]; - - netif_device_attach(dev); - if (netif_running(dev)) { - err = skge_up(dev); - - if (err) { - printk(KERN_ERR PFX "%s: could not up: %d\n", - dev->name, err); + if (dev) { + netif_device_attach(dev); + if (netif_running(dev) && skge_up(dev)) dev_close(dev); - goto out; - } } } -out: - return err; + return 0; } #endif diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index f37fe8fabe7b..805a7dcf5508 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -50,7 +50,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.7" +#define DRV_VERSION "1.6" #define PFX DRV_NAME " " /* @@ -195,6 +195,7 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) { u16 power_control; + u32 reg1; int vaux; pr_debug("sky2_set_power_state %d\n", state); @@ -227,9 +228,20 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) else sky2_write8(hw, B2_Y2_CLK_GATE, 0); - if (hw->chip_id == CHIP_ID_YUKON_EC_U) { - u32 reg1; + /* Turn off phy power saving */ + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); + + /* looks like this XL is back asswards .. */ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) { + reg1 |= PCI_Y2_PHY1_COMA; + if (hw->ports > 1) + reg1 |= PCI_Y2_PHY2_COMA; + } + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + udelay(100); + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); reg1 &= P_ASPM_CONTROL_MSK; @@ -241,6 +253,15 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) case PCI_D3hot: case PCI_D3cold: + /* Turn on phy power saving */ + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) + reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); + else + reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + udelay(100); + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) sky2_write8(hw, B2_Y2_CLK_GATE, 0); else @@ -264,7 +285,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } -static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) +static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) { u16 reg; @@ -512,29 +533,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); } -static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) -{ - u32 reg1; - static const u32 phy_power[] - = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; - - /* looks like this XL is back asswards .. */ - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) - onoff = !onoff; - - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); - - if (onoff) - /* Turn off phy power saving */ - reg1 &= ~phy_power[port]; - else - reg1 |= phy_power[port]; - - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - sky2_pci_read32(hw, PCI_DEV_REG1); - udelay(100); -} - /* Force a renegotiation */ static void sky2_phy_reinit(struct sky2_port *sky2) { @@ -767,10 +765,9 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) /* Update chip's next pointer */ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) { - q = Y2_QADDR(q, PREF_UNIT_PUT_IDX); wmb(); - sky2_write16(hw, q, idx); - sky2_read16(hw, q); + sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); + mmiowb(); } @@ -957,16 +954,14 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) /* * It appears the hardware has a bug in the FIFO logic that * cause it to hang if the FIFO gets overrun and the receive buffer - * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is - * aligned except if slab debugging is enabled. + * is not aligned. ALso alloc_skb() won't align properly if slab + * debugging is enabled. */ -static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev, - unsigned int length, - gfp_t gfp_mask) +static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask) { struct sk_buff *skb; - skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask); + skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); if (likely(skb)) { unsigned long p = (unsigned long) skb->data; skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p); @@ -1002,8 +997,7 @@ static int sky2_rx_start(struct sky2_port *sky2) for (i = 0; i < sky2->rx_pending; i++) { struct ring_info *re = sky2->rx_ring + i; - re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize, - GFP_KERNEL); + re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL); if (!re->skb) goto nomem; @@ -1091,8 +1085,6 @@ static int sky2_up(struct net_device *dev) if (!sky2->rx_ring) goto err_out; - sky2_phy_power(hw, port, 1); - sky2_mac_init(hw, port); /* Determine available ram buffer space (in 4K blocks). @@ -1197,6 +1189,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) struct sky2_tx_le *le = NULL; struct tx_ring_info *re; unsigned i, len; + int avail; dma_addr_t mapping; u32 addr64; u16 mss; @@ -1246,18 +1239,25 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* Check for TCP Segmentation Offload */ mss = skb_shinfo(skb)->gso_size; if (mss != 0) { + /* just drop the packet if non-linear expansion fails */ + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); mss += ETH_HLEN; + } - if (mss != sky2->tx_last_mss) { - le = get_tx_le(sky2); - le->tx.tso.size = cpu_to_le16(mss); - le->tx.tso.rsvd = 0; - le->opcode = OP_LRGLEN | HW_OWNER; - le->ctrl = 0; - sky2->tx_last_mss = mss; - } + if (mss != sky2->tx_last_mss) { + le = get_tx_le(sky2); + le->tx.tso.size = cpu_to_le16(mss); + le->tx.tso.rsvd = 0; + le->opcode = OP_LRGLEN | HW_OWNER; + le->ctrl = 0; + sky2->tx_last_mss = mss; } ctrl = 0; @@ -1285,17 +1285,12 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) if (skb->nh.iph->protocol == IPPROTO_UDP) ctrl |= UDPTCP; - if (hdr != sky2->tx_csum_start || offset != sky2->tx_csum_offset) { - sky2->tx_csum_start = hdr; - sky2->tx_csum_offset = offset; - - le = get_tx_le(sky2); - le->tx.csum.start = cpu_to_le16(hdr); - le->tx.csum.offset = cpu_to_le16(offset); - le->length = 0; /* initial checksum value */ - le->ctrl = 1; /* one packet */ - le->opcode = OP_TCPLISW | HW_OWNER; - } + le = get_tx_le(sky2); + le->tx.csum.start = cpu_to_le16(hdr); + le->tx.csum.offset = cpu_to_le16(offset); + le->length = 0; /* initial checksum value */ + le->ctrl = 1; /* one packet */ + le->opcode = OP_TCPLISW | HW_OWNER; } le = get_tx_le(sky2); @@ -1330,18 +1325,23 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) le->opcode = OP_BUFFER | HW_OWNER; fre = sky2->tx_ring - + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); + + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); pci_unmap_addr_set(fre, mapaddr, mapping); } re->idx = sky2->tx_prod; le->ctrl |= EOP; - if (tx_avail(sky2) <= MAX_SKB_TX_LE) - netif_stop_queue(dev); + avail = tx_avail(sky2); + if (mss != 0 || avail < TX_MIN_PENDING) { + le->ctrl |= FRC_STAT; + if (avail <= MAX_SKB_TX_LE) + netif_stop_queue(dev); + } sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); +out_unlock: spin_unlock(&sky2->tx_lock); dev->trans_start = jiffies; @@ -1426,7 +1426,7 @@ static int sky2_down(struct net_device *dev) /* Stop more packets from being queued */ netif_stop_queue(dev); - sky2_gmac_reset(hw, port); + sky2_phy_reset(hw, port); /* Stop transmitter */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); @@ -1474,8 +1474,6 @@ static int sky2_down(struct net_device *dev) imask &= ~portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); - sky2_phy_power(hw, port, 0); - /* turn off LED's */ sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); @@ -1839,16 +1837,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) * For small packets or errors, just reuse existing skb. * For larger packets, get new buffer. */ -static struct sk_buff *sky2_receive(struct net_device *dev, +static struct sk_buff *sky2_receive(struct sky2_port *sky2, u16 length, u32 status) { - struct sky2_port *sky2 = netdev_priv(dev); struct ring_info *re = sky2->rx_ring + sky2->rx_next; struct sk_buff *skb = NULL; if (unlikely(netif_msg_rx_status(sky2))) printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", - dev->name, sky2->rx_next, status, length); + sky2->netdev->name, sky2->rx_next, status, length); sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); @@ -1859,11 +1856,11 @@ static struct sk_buff *sky2_receive(struct net_device *dev, if (!(status & GMR_FS_RX_OK)) goto resubmit; - if (length > dev->mtu + ETH_HLEN) + if (length > sky2->netdev->mtu + ETH_HLEN) goto oversize; if (length < copybreak) { - skb = netdev_alloc_skb(dev, length + 2); + skb = alloc_skb(length + 2, GFP_ATOMIC); if (!skb) goto resubmit; @@ -1878,7 +1875,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, } else { struct sk_buff *nskb; - nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC); + nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC); if (!nskb) goto resubmit; @@ -1908,7 +1905,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, if (netif_msg_rx_err(sky2) && net_ratelimit()) printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", - dev->name, status, length); + sky2->netdev->name, status, length); if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) sky2->net_stats.rx_length_errors++; @@ -1962,10 +1959,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) switch (le->opcode & ~HW_OWNER) { case OP_RXSTAT: - skb = sky2_receive(dev, length, status); + skb = sky2_receive(sky2, length, status); if (!skb) break; + skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); dev->last_rx = jiffies; @@ -2410,7 +2408,7 @@ static int sky2_reset(struct sky2_hw *hw) sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); for (i = 0; i < hw->ports; i++) - sky2_gmac_reset(hw, i); + sky2_phy_reset(hw, i); memset(hw->st_le, 0, STATUS_LE_BYTES); hw->st_idx = 0; @@ -3201,8 +3199,6 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) struct pci_dev *pdev = hw->pdev; int err; - init_waitqueue_head (&hw->msi_wait); - sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw); @@ -3212,8 +3208,10 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) return err; } + init_waitqueue_head (&hw->msi_wait); + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); - sky2_read8(hw, B0_CTST); + wmb(); wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10); diff --git a/trunk/drivers/net/sky2.h b/trunk/drivers/net/sky2.h index fa8af9f503e4..2db8d19b22d1 100644 --- a/trunk/drivers/net/sky2.h +++ b/trunk/drivers/net/sky2.h @@ -1748,6 +1748,7 @@ enum { INIT_SUM= 1<<3, LOCK_SUM= 1<<4, INS_VLAN= 1<<5, + FRC_STAT= 1<<6, EOP = 1<<7, }; @@ -1843,8 +1844,6 @@ struct sky2_port { u32 tx_addr64; u16 tx_pending; u16 tx_last_mss; - u16 tx_csum_start; - u16 tx_csum_offset; struct ring_info *rx_ring ____cacheline_aligned_in_smp; struct sky2_rx_le *rx_le; diff --git a/trunk/drivers/pci/hotplug/Kconfig b/trunk/drivers/pci/hotplug/Kconfig index 8a60f391ffcf..3c148eaf2f4d 100644 --- a/trunk/drivers/pci/hotplug/Kconfig +++ b/trunk/drivers/pci/hotplug/Kconfig @@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM config HOTPLUG_PCI_ACPI tristate "ACPI PCI Hotplug driver" - depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI) + depends on ACPI_DOCK && HOTPLUG_PCI help Say Y here if you have a system that supports PCI Hotplug using ACPI. diff --git a/trunk/drivers/pci/hotplug/cpci_hotplug_pci.c b/trunk/drivers/pci/hotplug/cpci_hotplug_pci.c index 4afcaffd031c..02be74caa89f 100644 --- a/trunk/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/trunk/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot) int cpci_configure_slot(struct slot* slot) { - struct pci_bus *parent; - int fn; + unsigned char busnr; + struct pci_bus *child; dbg("%s - enter", __FUNCTION__); @@ -276,53 +276,23 @@ int cpci_configure_slot(struct slot* slot) */ n = pci_scan_slot(slot->bus, slot->devfn); dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); + if (n > 0) + pci_bus_add_devices(slot->bus); slot->dev = pci_get_slot(slot->bus, slot->devfn); if (slot->dev == NULL) { err("Could not find PCI device for slot %02x", slot->number); - return -ENODEV; + return 1; } } - parent = slot->dev->bus; - - for (fn = 0; fn < 8; fn++) { - struct pci_dev *dev; - - dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn)); - if (!dev) - continue; - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { - /* Find an unused bus number for the new bridge */ - struct pci_bus *child; - unsigned char busnr, start = parent->secondary; - unsigned char end = parent->subordinate; - - for (busnr = start; busnr <= end; busnr++) { - if (!pci_find_bus(pci_domain_nr(parent), - busnr)) - break; - } - if (busnr >= end) { - err("No free bus for hot-added bridge\n"); - pci_dev_put(dev); - continue; - } - child = pci_add_new_bus(parent, dev, busnr); - if (!child) { - err("Cannot add new bus for %s\n", - pci_name(dev)); - pci_dev_put(dev); - continue; - } - child->subordinate = pci_do_scan_bus(child); - pci_bus_size_bridges(child); - } - pci_dev_put(dev); + + if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); + child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); + pci_do_scan_bus(child); + pci_bus_size_bridges(child); } - pci_bus_assign_resources(parent); - pci_bus_add_devices(parent); - pci_enable_bridges(parent); + pci_bus_assign_resources(slot->dev->bus); dbg("%s - exit", __FUNCTION__); return 0; diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 474e9cd0e9e4..10e1a905c144 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -139,8 +139,9 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, /** * pci_match_device - Tell if a PCI device structure has a matching * PCI device id structure - * @drv: the PCI driver to match against + * @ids: array of PCI device id structures to search in * @dev: the PCI device structure to match against + * @drv: the PCI driver to match against * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index 73177429fe74..fb08bc951ac0 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -438,7 +438,6 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) pci_read_config_dword(dev, 0x48, ®ion); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); /* @@ -1092,6 +1091,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc ); static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { @@ -1518,63 +1518,6 @@ static void __devinit quirk_netmos(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); -static void __devinit quirk_e100_interrupt(struct pci_dev *dev) -{ - u16 command; - u32 bar; - u8 __iomem *csr; - u8 cmd_hi; - - switch (dev->device) { - /* PCI IDs taken from drivers/net/e100.c */ - case 0x1029: - case 0x1030 ... 0x1034: - case 0x1038 ... 0x103E: - case 0x1050 ... 0x1057: - case 0x1059: - case 0x1064 ... 0x106B: - case 0x1091 ... 0x1095: - case 0x1209: - case 0x1229: - case 0x2449: - case 0x2459: - case 0x245D: - case 0x27DC: - break; - default: - return; - } - - /* - * Some firmware hands off the e100 with interrupts enabled, - * which can cause a flood of interrupts if packets are - * received before the driver attaches to the device. So - * disable all e100 interrupts here. The driver will - * re-enable them when it's ready. - */ - pci_read_config_word(dev, PCI_COMMAND, &command); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); - - if (!(command & PCI_COMMAND_MEMORY) || !bar) - return; - - csr = ioremap(bar, 8); - if (!csr) { - printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", - pci_name(dev)); - return; - } - - cmd_hi = readb(csr + 3); - if (cmd_hi == 0) { - printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts " - "enabled, disabling\n", pci_name(dev)); - writeb(1, csr + 3); - } - - iounmap(csr); -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); static void __devinit fixup_rev1_53c810(struct pci_dev* dev) { diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 2c7de79c83b9..d6d1bff52b8e 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to) pr_debug("%s: aie=%d\n", __FUNCTION__, to); - tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; + tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; if (to) tmp |= S3C2410_RTCALM_ALMEN; - writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); + writeb(tmp, S3C2410_RTCALM); } static void s3c_rtc_setpie(int to) @@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to) pr_debug("%s: pie=%d\n", __FUNCTION__, to); spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; + tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; if (to) tmp |= S3C2410_TICNT_ENABLE; - writeb(tmp, s3c_rtc_base + S3C2410_TICNT); + writeb(tmp, S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq) unsigned int tmp; spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE; + tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; s3c_rtc_freq = freq; tmp |= (128 / freq)-1; - writeb(tmp, s3c_rtc_base + S3C2410_TICNT); + writeb(tmp, S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -113,15 +113,14 @@ static void s3c_rtc_setfreq(int freq) static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { unsigned int have_retried = 0; - void __iomem *base = s3c_rtc_base; retry_get_time: - rtc_tm->tm_min = readb(base + S3C2410_RTCMIN); - rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR); - rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE); - rtc_tm->tm_mon = readb(base + S3C2410_RTCMON); - rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR); - rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC); + rtc_tm->tm_min = readb(S3C2410_RTCMIN); + rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); + rtc_tm->tm_mday = readb(S3C2410_RTCDATE); + rtc_tm->tm_mon = readb(S3C2410_RTCMON); + rtc_tm->tm_year = readb(S3C2410_RTCYEAR); + rtc_tm->tm_sec = readb(S3C2410_RTCSEC); /* the only way to work out wether the system was mid-update * when we read it is to check the second counter, and if it @@ -152,26 +151,17 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) { - void __iomem *base = s3c_rtc_base; - int year = tm->tm_year - 100; + /* the rtc gets round the y2k problem by just not supporting it */ - pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n", - tm->tm_year, tm->tm_mon, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - - /* we get around y2k by simply not supporting it */ - - if (year < 0 || year >= 100) { - dev_err(dev, "rtc only supports 100 years\n"); + if (tm->tm_year < 100) return -EINVAL; - } - writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC); - writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN); - writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR); - writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE); - writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON); - writeb(BIN2BCD(year), base + S3C2410_RTCYEAR); + writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); + writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); + writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); + writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); + writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); + writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); return 0; } @@ -179,17 +169,16 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *alm_tm = &alrm->time; - void __iomem *base = s3c_rtc_base; unsigned int alm_en; - alm_tm->tm_sec = readb(base + S3C2410_ALMSEC); - alm_tm->tm_min = readb(base + S3C2410_ALMMIN); - alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR); - alm_tm->tm_mon = readb(base + S3C2410_ALMMON); - alm_tm->tm_mday = readb(base + S3C2410_ALMDATE); - alm_tm->tm_year = readb(base + S3C2410_ALMYEAR); + alm_tm->tm_sec = readb(S3C2410_ALMSEC); + alm_tm->tm_min = readb(S3C2410_ALMMIN); + alm_tm->tm_hour = readb(S3C2410_ALMHOUR); + alm_tm->tm_mon = readb(S3C2410_ALMMON); + alm_tm->tm_mday = readb(S3C2410_ALMDATE); + alm_tm->tm_year = readb(S3C2410_ALMYEAR); - alm_en = readb(base + S3C2410_RTCALM); + alm_en = readb(S3C2410_RTCALM); pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", alm_en, @@ -237,7 +226,6 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *tm = &alrm->time; - void __iomem *base = s3c_rtc_base; unsigned int alrm_en; pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", @@ -246,32 +234,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); - alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; - writeb(0x00, base + S3C2410_RTCALM); + alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; + writeb(0x00, S3C2410_RTCALM); if (tm->tm_sec < 60 && tm->tm_sec >= 0) { alrm_en |= S3C2410_RTCALM_SECEN; - writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC); + writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); } if (tm->tm_min < 60 && tm->tm_min >= 0) { alrm_en |= S3C2410_RTCALM_MINEN; - writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN); + writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); } if (tm->tm_hour < 24 && tm->tm_hour >= 0) { alrm_en |= S3C2410_RTCALM_HOUREN; - writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR); + writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); } pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); - writeb(alrm_en, base + S3C2410_RTCALM); + writeb(alrm_en, S3C2410_RTCALM); if (0) { - alrm_en = readb(base + S3C2410_RTCALM); + alrm_en = readb(S3C2410_RTCALM); alrm_en &= ~S3C2410_RTCALM_ALMEN; - writeb(alrm_en, base + S3C2410_RTCALM); + writeb(alrm_en, S3C2410_RTCALM); disable_irq_wake(s3c_rtc_alarmno); } @@ -331,8 +319,8 @@ static int s3c_rtc_ioctl(struct device *dev, static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM); - unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); + unsigned int rtcalm = readb(S3C2410_RTCALM); + unsigned int ticnt = readb (S3C2410_TICNT); seq_printf(seq, "alarm_IRQ\t: %s\n", (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); @@ -399,40 +387,39 @@ static struct rtc_class_ops s3c_rtcops = { static void s3c_rtc_enable(struct platform_device *pdev, int en) { - void __iomem *base = s3c_rtc_base; unsigned int tmp; if (s3c_rtc_base == NULL) return; if (!en) { - tmp = readb(base + S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON); + tmp = readb(S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); - tmp = readb(base + S3C2410_TICNT); - writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT); + tmp = readb(S3C2410_TICNT); + writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); } else { /* re-enable the device, and check it is ok */ - if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ + if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); - tmp = readb(base + S3C2410_RTCCON); - writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON); + tmp = readb(S3C2410_RTCCON); + writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); } - if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ + if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); - tmp = readb(base + S3C2410_RTCCON); - writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON); + tmp = readb(S3C2410_RTCCON); + writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); } - if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ + if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); - tmp = readb(base + S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON); + tmp = readb(S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); } } } @@ -488,8 +475,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) } s3c_rtc_mem = request_mem_region(res->start, - res->end-res->start+1, - pdev->name); + res->end-res->start+1, + pdev->name); if (s3c_rtc_mem == NULL) { dev_err(&pdev->dev, "failed to reserve memory region\n"); @@ -508,8 +495,7 @@ static int s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_enable(pdev, 1); - pr_debug("s3c2410_rtc: RTCCON=%02x\n", - readb(s3c_rtc_base + S3C2410_RTCCON)); + pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); s3c_rtc_setfreq(s3c_rtc_freq); @@ -557,7 +543,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) /* save TICNT for anyone using periodic interrupts */ - ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); + ticnt_save = readb(S3C2410_TICNT); /* calculate time delta for suspend */ @@ -581,7 +567,7 @@ static int s3c_rtc_resume(struct platform_device *pdev) rtc_tm_to_time(&tm, &time.tv_sec); restore_time_delta(&s3c_rtc_delta, &time); - writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); + writeb(ticnt_save, S3C2410_TICNT); return 0; } #else diff --git a/trunk/drivers/s390/block/dasd_devmap.c b/trunk/drivers/s390/block/dasd_devmap.c index 9af02c79ce8a..9d0c6e1a0e66 100644 --- a/trunk/drivers/s390/block/dasd_devmap.c +++ b/trunk/drivers/s390/block/dasd_devmap.c @@ -54,11 +54,11 @@ struct dasd_devmap { */ struct dasd_server_ssid_map { struct list_head list; - struct system_id { + struct server_id { char vendor[4]; char serial[15]; - __u16 ssid; } sid; + __u16 ssid; }; static struct list_head dasd_server_ssid_list; @@ -904,14 +904,14 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) return -ENOMEM; strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); - srv->sid.ssid = uid->ssid; + srv->ssid = uid->ssid; /* server is already contained ? */ spin_lock(&dasd_devmap_lock); devmap->uid = *uid; list_for_each_entry(tmp, &dasd_server_ssid_list, list) { if (!memcmp(&srv->sid, &tmp->sid, - sizeof(struct system_id))) { + sizeof(struct dasd_server_ssid_map))) { kfree(srv); srv = NULL; break; diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index b7a7fac3f7c3..957ed5db98e4 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device) * Valide storage server of current device. */ static int -dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) +dasd_eckd_validate_server(struct dasd_device *device) { int rc; @@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) return 0; rc = dasd_eckd_psf_ssc(device); - /* may be requested feature is not available on server, - * therefore just report error and go ahead */ - DEV_MESSAGE(KERN_INFO, device, - "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", - uid->vendor, uid->serial, uid->ssid, rc); + if (rc) + /* may be requested feature is not available on server, + * therefore just report error and go ahead */ + DEV_MESSAGE(KERN_INFO, device, + "Perform Subsystem Function returned rc=%d", rc); /* RE-Read Configuration Data */ return dasd_eckd_read_conf(device); } @@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) return rc; rc = dasd_set_uid(device->cdev, &uid); if (rc == 1) /* new server found */ - rc = dasd_eckd_validate_server(device, &uid); + rc = dasd_eckd_validate_server(device); if (rc) return rc; diff --git a/trunk/drivers/s390/scsi/zfcp_aux.c b/trunk/drivers/s390/scsi/zfcp_aux.c index adc9d8f2c28f..9cd789b8acd4 100644 --- a/trunk/drivers/s390/scsi/zfcp_aux.c +++ b/trunk/drivers/s390/scsi/zfcp_aux.c @@ -112,105 +112,6 @@ _zfcp_hex_dump(char *addr, int count) printk("\n"); } - -/****************************************************************/ -/****** Functions to handle the request ID hash table ********/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF - -static int zfcp_reqlist_init(struct zfcp_adapter *adapter) -{ - int i; - - adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), - GFP_KERNEL); - - if (!adapter->req_list) - return -ENOMEM; - - for (i=0; ireq_list[i]); - - return 0; -} - -static void zfcp_reqlist_free(struct zfcp_adapter *adapter) -{ - struct zfcp_fsf_req *request, *tmp; - unsigned int i; - - for (i=0; ireq_list[i])) - continue; - - list_for_each_entry_safe(request, tmp, - &adapter->req_list[i], list) - list_del(&request->list); - } - - kfree(adapter->req_list); -} - -void zfcp_reqlist_add(struct zfcp_adapter *adapter, - struct zfcp_fsf_req *fsf_req) -{ - unsigned int i; - - i = fsf_req->req_id % REQUEST_LIST_SIZE; - list_add_tail(&fsf_req->list, &adapter->req_list[i]); -} - -void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) -{ - struct zfcp_fsf_req *request, *tmp; - unsigned int i, counter; - u64 dbg_tmp[2]; - - i = req_id % REQUEST_LIST_SIZE; - BUG_ON(list_empty(&adapter->req_list[i])); - - counter = 0; - list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { - if (request->req_id == req_id) { - dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); - dbg_tmp[1] = (u64) counter; - debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); - list_del(&request->list); - break; - } - counter++; - } -} - -struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, - unsigned long req_id) -{ - struct zfcp_fsf_req *request, *tmp; - unsigned int i; - - i = req_id % REQUEST_LIST_SIZE; - - list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) - if (request->req_id == req_id) - return request; - - return NULL; -} - -int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) -{ - unsigned int i; - - for (i=0; ireq_list[i])) - return 0; - - return 1; -} - -#undef ZFCP_LOG_AREA - /****************************************************************/ /************** Uncategorised Functions *************************/ /****************************************************************/ @@ -1060,12 +961,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_LIST_HEAD(&adapter->port_remove_lh); /* initialize list of fsf requests */ - spin_lock_init(&adapter->req_list_lock); - retval = zfcp_reqlist_init(adapter); - if (retval) { - ZFCP_LOG_INFO("request list initialization failed\n"); - goto failed_low_mem_buffers; - } + spin_lock_init(&adapter->fsf_req_list_lock); + INIT_LIST_HEAD(&adapter->fsf_req_list_head); /* initialize debug locks */ @@ -1144,6 +1041,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) * !0 - struct zfcp_adapter data structure could not be removed * (e.g. still used) * locks: adapter list write lock is assumed to be held by caller + * adapter->fsf_req_list_lock is taken and released within this + * function and must not be held on entry */ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) @@ -1155,14 +1054,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); dev_set_drvdata(&adapter->ccw_device->dev, NULL); /* sanity check: no pending FSF requests */ - spin_lock_irqsave(&adapter->req_list_lock, flags); - retval = zfcp_reqlist_isempty(adapter); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - if (!retval) { + spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); + retval = !list_empty(&adapter->fsf_req_list_head); + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + if (retval) { ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " "%i requests outstanding\n", zfcp_get_busid_by_adapter(adapter), adapter, - atomic_read(&adapter->reqs_active)); + atomic_read(&adapter->fsf_reqs_active)); retval = -EBUSY; goto out; } @@ -1188,7 +1087,6 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_free_low_mem_buffers(adapter); /* free memory of adapter data structure and queues */ zfcp_qdio_free_queues(adapter); - zfcp_reqlist_free(adapter); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); ZFCP_LOG_TRACE("freeing adapter structure\n"); diff --git a/trunk/drivers/s390/scsi/zfcp_ccw.c b/trunk/drivers/s390/scsi/zfcp_ccw.c index fdabadeaa9ee..57d8e4bfb8d9 100644 --- a/trunk/drivers/s390/scsi/zfcp_ccw.c +++ b/trunk/drivers/s390/scsi/zfcp_ccw.c @@ -164,11 +164,6 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) retval = zfcp_adapter_scsi_register(adapter); if (retval) goto out_scsi_register; - - /* initialize request counter */ - BUG_ON(!zfcp_reqlist_isempty(adapter)); - adapter->req_no = 0; - zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); diff --git a/trunk/drivers/s390/scsi/zfcp_def.h b/trunk/drivers/s390/scsi/zfcp_def.h index 94d1b74db356..2df512a18e2c 100644 --- a/trunk/drivers/s390/scsi/zfcp_def.h +++ b/trunk/drivers/s390/scsi/zfcp_def.h @@ -52,7 +52,7 @@ /********************* GENERAL DEFINES *********************************/ /* zfcp version number, it consists of major, minor, and patch-level number */ -#define ZFCP_VERSION "4.8.0" +#define ZFCP_VERSION "4.7.0" /** * zfcp_sg_to_address - determine kernel address from struct scatterlist @@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) #define REQUEST_LIST_SIZE 128 /********************* SCSI SPECIFIC DEFINES *********************************/ -#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) +#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ @@ -886,11 +886,11 @@ struct zfcp_adapter { struct list_head port_remove_lh; /* head of ports to be removed */ u32 ports; /* number of remote ports */ - struct timer_list scsi_er_timer; /* SCSI err recovery watch */ - atomic_t reqs_active; /* # active FSF reqs */ - unsigned long req_no; /* unique FSF req number */ - struct list_head *req_list; /* list of pending reqs */ - spinlock_t req_list_lock; /* request list lock */ + struct timer_list scsi_er_timer; /* SCSI err recovery watch */ + struct list_head fsf_req_list_head; /* head of FSF req list */ + spinlock_t fsf_req_list_lock; /* lock for ops on list of + FSF requests */ + atomic_t fsf_reqs_active; /* # active FSF reqs */ struct zfcp_qdio_queue request_queue; /* request queue */ u32 fsf_req_seq_no; /* FSF cmnd seq number */ wait_queue_head_t request_wq; /* can be used to wait for @@ -986,7 +986,6 @@ struct zfcp_unit { /* FSF request */ struct zfcp_fsf_req { struct list_head list; /* list of FSF requests */ - unsigned long req_id; /* unique request ID */ struct zfcp_adapter *adapter; /* adapter request belongs to */ u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_first; /* first SBAL for this request */ diff --git a/trunk/drivers/s390/scsi/zfcp_erp.c b/trunk/drivers/s390/scsi/zfcp_erp.c index 7f60b6fdf724..8ec8da0beaa8 100644 --- a/trunk/drivers/s390/scsi/zfcp_erp.c +++ b/trunk/drivers/s390/scsi/zfcp_erp.c @@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); -static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); -static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); +static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); +static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); @@ -93,9 +93,10 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); -static void zfcp_erp_action_dismiss_port(struct zfcp_port *); -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); -static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); +static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); +static int zfcp_erp_action_dismiss_port(struct zfcp_port *); +static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); +static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); @@ -134,39 +135,29 @@ zfcp_fsf_request_timeout_handler(unsigned long data) zfcp_erp_adapter_reopen(adapter, 0); } -/** - * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks - * - * This function needs to be called whenever a SCSI error recovery - * action (abort/reset) does not return. Re-opening the adapter means - * that the abort/reset command can be returned by zfcp. It won't complete - * via the adapter anymore (because qdio queues are closed). If ERP is - * already running on this adapter it will be stopped. +/* + * function: zfcp_fsf_scsi_er_timeout_handler + * + * purpose: This function needs to be called whenever a SCSI error recovery + * action (abort/reset) does not return. + * Re-opening the adapter means that the command can be returned + * by zfcp (it is guarranteed that it does not return via the + * adapter anymore). The buffer can then be used again. + * + * returns: sod all */ -void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) +void +zfcp_fsf_scsi_er_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; - unsigned long flags; ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " "Restarting all operations on the adapter %s\n", zfcp_get_busid_by_adapter(adapter)); debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); - - write_lock_irqsave(&adapter->erp_lock, flags); - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, - &adapter->status)) { - zfcp_erp_modify_adapter_status(adapter, - ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, - ZFCP_CLEAR); - zfcp_erp_action_dismiss_adapter(adapter); - write_unlock_irqrestore(&adapter->erp_lock, flags); - /* dismiss all pending requests including requests for ERP */ - zfcp_fsf_req_dismiss_all(adapter); - adapter->fsf_req_seq_no = 0; - } else - write_unlock_irqrestore(&adapter->erp_lock, flags); zfcp_erp_adapter_reopen(adapter, 0); + + return; } /* @@ -679,10 +670,17 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) return retval; } -/** - * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests +/* + * function: + * + * purpose: disable I/O, + * return any open requests and clean them up, + * aim: no pending and incoming I/O + * + * returns: */ -static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) +static void +zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) { debug_text_event(adapter->erp_dbf, 6, "a_bl"); zfcp_erp_modify_adapter_status(adapter, @@ -690,10 +688,15 @@ static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) clear_mask, ZFCP_CLEAR); } -/** - * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests +/* + * function: + * + * purpose: enable I/O + * + * returns: */ -static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) +static void +zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { debug_text_event(adapter->erp_dbf, 6, "a_ubl"); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); @@ -845,16 +848,18 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) struct zfcp_adapter *adapter = erp_action->adapter; if (erp_action->fsf_req) { - /* take lock to ensure that request is not deleted meanwhile */ - spin_lock(&adapter->req_list_lock); - if ((!zfcp_reqlist_ismember(adapter, - erp_action->fsf_req->req_id)) && - (fsf_req->erp_action == erp_action)) { + /* take lock to ensure that request is not being deleted meanwhile */ + spin_lock(&adapter->fsf_req_list_lock); + /* check whether fsf req does still exist */ + list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) + if (fsf_req == erp_action->fsf_req) + break; + if (fsf_req && (fsf_req->erp_action == erp_action)) { /* fsf_req still exists */ debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); debug_event(adapter->erp_dbf, 3, &fsf_req, sizeof (unsigned long)); - /* dismiss fsf_req of timed out/dismissed erp_action */ + /* dismiss fsf_req of timed out or dismissed erp_action */ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { debug_text_event(adapter->erp_dbf, 3, @@ -887,22 +892,30 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) */ erp_action->fsf_req = NULL; } - spin_unlock(&adapter->req_list_lock); + spin_unlock(&adapter->fsf_req_list_lock); } else debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); return retval; } -/** - * zfcp_erp_async_handler_nolock - complete erp_action +/* + * purpose: generic handler for asynchronous events related to erp_action events + * (normal completion, time-out, dismissing, retry after + * low memory condition) + * + * note: deletion of timer is not required (e.g. in case of a time-out), + * but a second try does no harm, + * we leave it in here to allow for greater simplification * - * Used for normal completion, time-out, dismissal and failure after - * low memory condition. + * returns: 0 - there was an action to handle + * !0 - otherwise */ -static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +static int +zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { + int retval; struct zfcp_adapter *adapter = erp_action->adapter; if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { @@ -913,26 +926,43 @@ static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, del_timer(&erp_action->timer); erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); + retval = 0; } else { /* action is ready or gone - nothing to do */ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); + retval = 1; } + + return retval; } -/** - * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking +/* + * purpose: generic handler for asynchronous events related to erp_action + * events (normal completion, time-out, dismissing, retry after + * low memory condition) + * + * note: deletion of timer is not required (e.g. in case of a time-out), + * but a second try does no harm, + * we leave it in here to allow for greater simplification + * + * returns: 0 - there was an action to handle + * !0 - otherwise */ -void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +int +zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; + int retval; write_lock_irqsave(&adapter->erp_lock, flags); - zfcp_erp_async_handler_nolock(erp_action, set_mask); + retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); write_unlock_irqrestore(&adapter->erp_lock, flags); + + return retval; } /* @@ -969,15 +999,17 @@ zfcp_erp_timeout_handler(unsigned long data) zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); } -/** - * zfcp_erp_action_dismiss - dismiss an erp_action +/* + * purpose: is called for an erp_action which needs to be ended + * though not being done, + * this is usually required if an higher is generated, + * action gets an appropriate flag and will be processed + * accordingly * - * adapter->erp_lock must be held - * - * Dismissal of an erp_action is usually required if an erp_action of - * higher priority is generated. + * locks: erp_lock held (thus we need to call another handler variant) */ -static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) +static int +zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; @@ -985,6 +1017,8 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); + + return 0; } int @@ -2040,12 +2074,18 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) return retval; } -/** - * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter +/* + * function: zfcp_qdio_cleanup + * + * purpose: cleans up QDIO operation for the specified adapter + * + * returns: 0 - successful cleanup + * !0 - failed cleanup */ -static void +int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) { + int retval = ZFCP_ERP_SUCCEEDED; int first_used; int used_count; struct zfcp_adapter *adapter = erp_action->adapter; @@ -2054,13 +2094,15 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " "queues on adapter %s\n", zfcp_get_busid_by_adapter(adapter)); - return; + retval = ZFCP_ERP_FAILED; + goto out; } /* * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that * do_QDIO won't be called while qdio_shutdown is in progress. */ + write_lock_irq(&adapter->request_queue.queue_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); write_unlock_irq(&adapter->request_queue.queue_lock); @@ -2092,6 +2134,8 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) adapter->request_queue.free_index = 0; atomic_set(&adapter->request_queue.free_count, 0); adapter->request_queue.distance_from_int = 0; + out: + return retval; } static int @@ -2214,11 +2258,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) "%s)\n", zfcp_get_busid_by_adapter(adapter)); ret = ZFCP_ERP_FAILED; } - - /* don't treat as error for the sake of compatibility */ - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) - ZFCP_LOG_INFO("warning: exchange port data failed (adapter " + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { + ZFCP_LOG_INFO("error: exchange port data failed (adapter " "%s\n", zfcp_get_busid_by_adapter(adapter)); + ret = ZFCP_ERP_FAILED; + } return ret; } @@ -2248,12 +2292,18 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action return retval; } -/** - * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter +/* + * function: zfcp_fsf_cleanup + * + * purpose: cleanup FSF operation for specified adapter + * + * returns: 0 - FSF operation successfully cleaned up + * !0 - failed to cleanup FSF operation for this adapter */ -static void +static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) { + int retval = ZFCP_ERP_SUCCEEDED; struct zfcp_adapter *adapter = erp_action->adapter; /* @@ -2267,6 +2317,8 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); + + return retval; } /* @@ -3241,8 +3293,10 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, } -void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +static int +zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) { + int retval = 0; struct zfcp_port *port; debug_text_event(adapter->erp_dbf, 5, "a_actab"); @@ -3251,10 +3305,14 @@ void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) else list_for_each_entry(port, &adapter->port_list_head, list) zfcp_erp_action_dismiss_port(port); + + return retval; } -static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) +static int +zfcp_erp_action_dismiss_port(struct zfcp_port *port) { + int retval = 0; struct zfcp_unit *unit; struct zfcp_adapter *adapter = port->adapter; @@ -3265,16 +3323,22 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) else list_for_each_entry(unit, &port->unit_list_head, list) zfcp_erp_action_dismiss_unit(unit); + + return retval; } -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static int +zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) { + int retval = 0; struct zfcp_adapter *adapter = unit->port->adapter; debug_text_event(adapter->erp_dbf, 5, "u_actab"); debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) zfcp_erp_action_dismiss(&unit->erp_action); + + return retval; } static inline void diff --git a/trunk/drivers/s390/scsi/zfcp_ext.h b/trunk/drivers/s390/scsi/zfcp_ext.h index 146d7a2b4c4a..d02366004cdd 100644 --- a/trunk/drivers/s390/scsi/zfcp_ext.h +++ b/trunk/drivers/s390/scsi/zfcp_ext.h @@ -63,6 +63,7 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); extern void zfcp_qdio_free_queues(struct zfcp_adapter *); extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, struct zfcp_fsf_req *); +extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req (struct zfcp_fsf_req *, int, int); @@ -139,7 +140,6 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); -extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); extern int zfcp_erp_port_reopen(struct zfcp_port *, int); @@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern int zfcp_erp_thread_kill(struct zfcp_adapter *); extern int zfcp_erp_wait(struct zfcp_adapter *); -extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); +extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); extern int zfcp_test_link(struct zfcp_port *); @@ -190,10 +190,5 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); -extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); -extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); -extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, - unsigned long); -extern int zfcp_reqlist_isempty(struct zfcp_adapter *); #endif /* ZFCP_EXT_H */ diff --git a/trunk/drivers/s390/scsi/zfcp_fsf.c b/trunk/drivers/s390/scsi/zfcp_fsf.c index ff2eacf5ec8c..31db2b06faba 100644 --- a/trunk/drivers/s390/scsi/zfcp_fsf.c +++ b/trunk/drivers/s390/scsi/zfcp_fsf.c @@ -49,6 +49,7 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, struct fsf_link_down_info *); static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); +static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); /* association between FSF command and FSF QTCB type */ static u32 fsf_qtcb_type[] = { @@ -145,50 +146,49 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) kfree(fsf_req); } -/** - * zfcp_fsf_req_dismiss - dismiss a single fsf request +/* + * function: + * + * purpose: + * + * returns: + * + * note: qdio queues shall be down (no ongoing inbound processing) */ -static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, - struct zfcp_fsf_req *fsf_req, - unsigned int counter) +int +zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) { - u64 dbg_tmp[2]; + struct zfcp_fsf_req *fsf_req, *tmp; + unsigned long flags; + LIST_HEAD(remove_queue); - dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); - dbg_tmp[1] = (u64) counter; - debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); - list_del(&fsf_req->list); - fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(fsf_req); -} + spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); + list_splice_init(&adapter->fsf_req_list_head, &remove_queue); + atomic_set(&adapter->fsf_reqs_active, 0); + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); -/** - * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests - */ -int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) -{ - struct zfcp_fsf_req *request, *tmp; - unsigned long flags; - unsigned int i, counter; - - spin_lock_irqsave(&adapter->req_list_lock, flags); - atomic_set(&adapter->reqs_active, 0); - for (i=0; ireq_list[i])) - continue; - - counter = 0; - list_for_each_entry_safe(request, tmp, - &adapter->req_list[i], list) { - zfcp_fsf_req_dismiss(adapter, request, counter); - counter++; - } + list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { + list_del(&fsf_req->list); + zfcp_fsf_req_dismiss(fsf_req); } - spin_unlock_irqrestore(&adapter->req_list_lock, flags); return 0; } +/* + * function: + * + * purpose: + * + * returns: + */ +static void +zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) +{ + fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(fsf_req); +} + /* * function: zfcp_fsf_req_complete * @@ -4592,14 +4592,12 @@ static inline void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { - fsf_req->qtcb->prefix.req_seq_no = - fsf_req->adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_id = fsf_req->req_id; + fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; + fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; - fsf_req->qtcb->prefix.qtcb_type = - fsf_qtcb_type[fsf_req->fsf_command]; + fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; - fsf_req->qtcb->header.req_handle = fsf_req->req_id; + fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; } } @@ -4656,7 +4654,6 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, { volatile struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req = NULL; - unsigned long flags; int ret = 0; struct zfcp_qdio_queue *req_queue = &adapter->request_queue; @@ -4671,12 +4668,6 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, fsf_req->adapter = adapter; fsf_req->fsf_command = fsf_cmd; - INIT_LIST_HEAD(&fsf_req->list); - - /* unique request id */ - spin_lock_irqsave(&adapter->req_list_lock, flags); - fsf_req->req_id = adapter->req_no++; - spin_unlock_irqrestore(&adapter->req_list_lock, flags); zfcp_fsf_req_qtcb_init(fsf_req); @@ -4716,7 +4707,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); /* setup common SBALE fields */ - sbale[0].addr = (void *) fsf_req->req_id; + sbale[0].addr = fsf_req; sbale[0].flags |= SBAL_FLAGS0_COMMAND; if (likely(fsf_req->qtcb != NULL)) { sbale[1].addr = (void *) fsf_req->qtcb; @@ -4756,7 +4747,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) volatile struct qdio_buffer_element *sbale; int inc_seq_no; int new_distance_from_int; - u64 dbg_tmp[2]; + unsigned long flags; int retval = 0; adapter = fsf_req->adapter; @@ -4770,10 +4761,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, sbale[1].length); - /* put allocated FSF request into hash table */ - spin_lock(&adapter->req_list_lock); - zfcp_reqlist_add(adapter, fsf_req); - spin_unlock(&adapter->req_list_lock); + /* put allocated FSF request at list tail */ + spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); + list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); inc_seq_no = (fsf_req->qtcb != NULL); @@ -4812,10 +4803,6 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) QDIO_FLAG_SYNC_OUTPUT, 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); - dbg_tmp[0] = (unsigned long) sbale[0].addr; - dbg_tmp[1] = (u64) retval; - debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); - if (unlikely(retval)) { /* Queues are down..... */ retval = -EIO; @@ -4825,17 +4812,22 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) */ if (timer) del_timer(timer); - spin_lock(&adapter->req_list_lock); - zfcp_reqlist_remove(adapter, fsf_req->req_id); - spin_unlock(&adapter->req_list_lock); - /* undo changes in request queue made for this request */ + spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); + list_del(&fsf_req->list); + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + /* + * adjust the number of free SBALs in request queue as well as + * position of first one + */ zfcp_qdio_zero_sbals(req_queue->buffer, fsf_req->sbal_first, fsf_req->sbal_number); atomic_add(fsf_req->sbal_number, &req_queue->free_count); - req_queue->free_index -= fsf_req->sbal_number; + req_queue->free_index -= fsf_req->sbal_number; /* increase */ req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ - zfcp_erp_adapter_reopen(adapter, 0); + ZFCP_LOG_DEBUG + ("error: do_QDIO failed. Buffers could not be enqueued " + "to request queue.\n"); } else { req_queue->distance_from_int = new_distance_from_int; /* @@ -4851,7 +4843,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) adapter->fsf_req_seq_no++; /* count FSF requests pending */ - atomic_inc(&adapter->reqs_active); + atomic_inc(&adapter->fsf_reqs_active); } return retval; } diff --git a/trunk/drivers/s390/scsi/zfcp_qdio.c b/trunk/drivers/s390/scsi/zfcp_qdio.c index dbd9f48e863e..49ea5add4abc 100644 --- a/trunk/drivers/s390/scsi/zfcp_qdio.c +++ b/trunk/drivers/s390/scsi/zfcp_qdio.c @@ -282,37 +282,6 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, return; } -/** - * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status - */ -static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, - unsigned long req_id) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - debug_long_event(adapter->erp_dbf, 4, req_id); - - spin_lock_irqsave(&adapter->req_list_lock, flags); - fsf_req = zfcp_reqlist_ismember(adapter, req_id); - - if (!fsf_req) { - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); - zfcp_erp_adapter_reopen(adapter, 0); - return -EINVAL; - } - - zfcp_reqlist_remove(adapter, req_id); - atomic_dec(&adapter->reqs_active); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - - /* finish the FSF request */ - zfcp_fsf_req_complete(fsf_req); - - return 0; -} - /* * function: zfcp_qdio_response_handler * @@ -375,7 +344,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, /* look for QDIO request identifiers in SB */ buffere = &buffer->element[buffere_index]; retval = zfcp_qdio_reqid_check(adapter, - (unsigned long) buffere->addr); + (void *) buffere->addr); if (retval) { ZFCP_LOG_NORMAL("bug: unexpected inbound " @@ -446,6 +415,52 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, return; } +/* + * function: zfcp_qdio_reqid_check + * + * purpose: checks for valid reqids or unsolicited status + * + * returns: 0 - valid request id or unsolicited status + * !0 - otherwise + */ +int +zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) +{ + struct zfcp_fsf_req *fsf_req; + unsigned long flags; + + /* invalid (per convention used in this driver) */ + if (unlikely(!sbale_addr)) { + ZFCP_LOG_NORMAL("bug: invalid reqid\n"); + return -EINVAL; + } + + /* valid request id and thus (hopefully :) valid fsf_req address */ + fsf_req = (struct zfcp_fsf_req *) sbale_addr; + + /* serialize with zfcp_fsf_req_dismiss_all */ + spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); + if (list_empty(&adapter->fsf_req_list_head)) { + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + return 0; + } + list_del(&fsf_req->list); + atomic_dec(&adapter->fsf_reqs_active); + spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + + if (unlikely(adapter != fsf_req->adapter)) { + ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " + "fsf_req->adapter=%p, adapter=%p)\n", + fsf_req, fsf_req->adapter, adapter); + return -EINVAL; + } + + /* finish the FSF request */ + zfcp_fsf_req_complete(fsf_req); + + return 0; +} + /** * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue * @queue: queue from which SBALE should be returned diff --git a/trunk/drivers/s390/scsi/zfcp_scsi.c b/trunk/drivers/s390/scsi/zfcp_scsi.c index 1bb55086db9f..671f4a6a5d18 100644 --- a/trunk/drivers/s390/scsi/zfcp_scsi.c +++ b/trunk/drivers/s390/scsi/zfcp_scsi.c @@ -30,6 +30,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *)); static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); +static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); static int zfcp_task_management_function(struct zfcp_unit *, u8, struct scsi_cmnd *); @@ -45,22 +46,30 @@ struct zfcp_data zfcp_data = { .scsi_host_template = { .name = ZFCP_NAME, .proc_name = "zfcp", + .proc_info = NULL, + .detect = NULL, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, + .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .can_queue = 4096, .this_id = -1, + /* + * FIXME: + * one less? can zfcp_create_sbale cope with it? + */ .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, .cmd_per_lun = 1, + .unchecked_isa_dma = 0, .use_clustering = 1, .sdev_attrs = zfcp_sysfs_sdev_attrs, }, .driver_version = ZFCP_VERSION, + /* rest initialised with zeros */ }; /* Find start of Response Information in FCP response unit*/ @@ -167,14 +176,8 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) return retval; } -/** - * zfcp_scsi_slave_destroy - called when scsi device is removed - * - * Remove reference to associated scsi device for an zfcp_unit. - * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs - * or a scan for this device might have failed. - */ -static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +static void +zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -182,7 +185,6 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); sdpnt->hostdata = NULL; unit->device = NULL; - zfcp_erp_unit_failed(unit); zfcp_unit_put(unit); } else { ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " @@ -547,38 +549,35 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, } /** - * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset - * - * If ERP is already running it will be stopped. + * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) */ -int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) +int +zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit; - struct zfcp_adapter *adapter; - unsigned long flags; - - unit = (struct zfcp_unit*) scpnt->device->hostdata; - adapter = unit->port->adapter; + struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; + struct zfcp_adapter *adapter = unit->port->adapter; - ZFCP_LOG_NORMAL("host/bus reset because of problems with " + ZFCP_LOG_NORMAL("bus reset because of problems with " "unit 0x%016Lx\n", unit->fcp_lun); + zfcp_erp_adapter_reopen(adapter, 0); + zfcp_erp_wait(adapter); - write_lock_irqsave(&adapter->erp_lock, flags); - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, - &adapter->status)) { - zfcp_erp_modify_adapter_status(adapter, - ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, - ZFCP_CLEAR); - zfcp_erp_action_dismiss_adapter(adapter); - write_unlock_irqrestore(&adapter->erp_lock, flags); - zfcp_fsf_req_dismiss_all(adapter); - adapter->fsf_req_seq_no = 0; - zfcp_erp_adapter_reopen(adapter, 0); - } else { - write_unlock_irqrestore(&adapter->erp_lock, flags); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); - } + return SUCCESS; +} + +/** + * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) + */ +int +zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) +{ + struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; + struct zfcp_adapter *adapter = unit->port->adapter; + + ZFCP_LOG_NORMAL("host reset because of problems with " + "unit 0x%016Lx\n", unit->fcp_lun); + zfcp_erp_adapter_reopen(adapter, 0); + zfcp_erp_wait(adapter); return SUCCESS; } diff --git a/trunk/drivers/scsi/ata_piix.c b/trunk/drivers/scsi/ata_piix.c index 2d20caf377f5..5e8afc876980 100644 --- a/trunk/drivers/scsi/ata_piix.c +++ b/trunk/drivers/scsi/ata_piix.c @@ -390,8 +390,7 @@ static struct ata_port_info piix_port_info[] = { /* ich5_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | - PIIX_FLAG_IGNORE_PCS, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ @@ -468,11 +467,6 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, piix_pci_tbl); MODULE_VERSION(DRV_VERSION); -static int force_pcs = 0; -module_param(force_pcs, int, 0444); -MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " - "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); - /** * piix_pata_cbl_detect - Probe host controller cable detect info * @ap: Port for which cable detect info is desired @@ -537,25 +531,27 @@ static void piix_pata_error_handler(struct ata_port *ap) } /** - * piix_sata_present_mask - determine present mask for SATA host controller + * piix_sata_prereset - prereset for SATA host controller * @ap: Target port * - * Reads SATA PCI device's PCI config register Port Configuration - * and Status (PCS) to determine port and device availability. + * Reads and configures SATA PCI device's PCI config register + * Port Configuration and Status (PCS) to determine port and + * device availability. Return -ENODEV to skip reset if no + * device is present. * * LOCKING: * None (inherited from caller). * * RETURNS: - * determined present_mask + * 0 if device is present, -ENODEV otherwise. */ -static unsigned int piix_sata_present_mask(struct ata_port *ap) +static int piix_sata_prereset(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); struct piix_host_priv *hpriv = ap->host_set->private_data; const unsigned int *map = hpriv->map; int base = 2 * ap->hard_port_no; - unsigned int present_mask = 0; + unsigned int present = 0; int port, i; u16 pcs; @@ -568,52 +564,24 @@ static unsigned int piix_sata_present_mask(struct ata_port *ap) continue; if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || (pcs & 1 << (hpriv->map_db->present_shift + port))) - present_mask |= 1 << i; + present = 1; } - DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", - ap->id, pcs, present_mask); - - return present_mask; -} - -/** - * piix_sata_softreset - reset SATA host port via ATA SRST - * @ap: port to reset - * @classes: resulting classes of attached devices - * - * Reset SATA host port via ATA SRST. On controllers with - * reliable PCS present bits, the bits are used to determine - * device presence. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes) -{ - unsigned int present_mask; - int i, rc; - - present_mask = piix_sata_present_mask(ap); - - rc = ata_std_softreset(ap, classes); - if (rc) - return rc; + DPRINTK("ata%u: LEAVE, pcs=0x%x present=0x%x\n", + ap->id, pcs, present); - for (i = 0; i < ATA_MAX_DEVICES; i++) { - if (!(present_mask & (1 << i))) - classes[i] = ATA_DEV_NONE; + if (!present) { + ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); + ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; + return 0; } - return 0; + return ata_std_prereset(ap); } static void piix_sata_error_handler(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, + ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, ata_std_postreset); } @@ -817,7 +785,6 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) } static void __devinit piix_init_pcs(struct pci_dev *pdev, - struct ata_port_info *pinfo, const struct piix_map_db *map_db) { u16 pcs, new_pcs; @@ -831,18 +798,6 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev, pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } - - if (force_pcs == 1) { - dev_printk(KERN_INFO, &pdev->dev, - "force ignoring PCS (0x%x)\n", new_pcs); - pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; - pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; - } else if (force_pcs == 2) { - dev_printk(KERN_INFO, &pdev->dev, - "force honoring PCS (0x%x)\n", new_pcs); - pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; - pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; - } } static void __devinit piix_init_sata_map(struct pci_dev *pdev, @@ -951,8 +906,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (host_flags & ATA_FLAG_SATA) { piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - piix_init_pcs(pdev, port_info, - piix_map_db_table[ent->driver_data]); + piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); } /* On ICH5, some BIOSen disable the interrupt using the diff --git a/trunk/drivers/scsi/esp.c b/trunk/drivers/scsi/esp.c index 5630868c1b25..98bd22714d0d 100644 --- a/trunk/drivers/scsi/esp.c +++ b/trunk/drivers/scsi/esp.c @@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev; static int __init esp_sun4_probe(struct scsi_host_template *tpnt) { if (sun4_esp_physaddr) { - memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); + memset(&sun4_esp_dev, 0, sizeof(esp_dev)); sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; sun4_esp_dev.irqs[0] = 4; sun4_esp_dev.resource[0].start = sun4_esp_physaddr; @@ -1162,7 +1162,6 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt) static int __devexit esp_sun4_remove(void) { - struct of_device *dev = &sun4_esp_dev.ofdev; struct esp *esp = dev_get_drvdata(&dev->dev); return esp_remove_common(esp); diff --git a/trunk/drivers/scsi/hptiop.c b/trunk/drivers/scsi/hptiop.c index bcb3444f1dcf..ab2f8b267908 100644 --- a/trunk/drivers/scsi/hptiop.c +++ b/trunk/drivers/scsi/hptiop.c @@ -45,6 +45,10 @@ static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; static const char driver_ver[] = "v1.0 (060426)"; +static DEFINE_SPINLOCK(hptiop_hba_list_lock); +static LIST_HEAD(hptiop_hba_list); +static int hptiop_cdev_major = -1; + static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); @@ -573,7 +577,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); writel(IOPMU_INBOUND_MSG0_RESET, - &hba->iop->inbound_msgaddr0); + &hba->iop->outbound_msgaddr0); hptiop_pci_posting_flush(hba->iop); } @@ -616,11 +620,532 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, return queue_depth; } +struct hptiop_getinfo { + char __user *buffer; + loff_t buflength; + loff_t bufoffset; + loff_t buffillen; + loff_t filpos; +}; + +static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, + char *data, int datalen) +{ + if (pinfo->filpos < pinfo->bufoffset) { + if (pinfo->filpos + datalen <= pinfo->bufoffset) { + pinfo->filpos += datalen; + return; + } else { + data += (pinfo->bufoffset - pinfo->filpos); + datalen -= (pinfo->bufoffset - pinfo->filpos); + pinfo->filpos = pinfo->bufoffset; + } + } + + pinfo->filpos += datalen; + if (pinfo->buffillen == pinfo->buflength) + return; + + if (pinfo->buflength - pinfo->buffillen < datalen) + datalen = pinfo->buflength - pinfo->buffillen; + + if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) + return; + + pinfo->buffillen += datalen; +} + +static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) +{ + va_list args; + char buf[128]; + int len; + + va_start(args, fmt); + len = vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + hptiop_copy_mem_info(pinfo, buf, len); + return len; +} + +static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) +{ + arg->done = NULL; + wake_up(&arg->hba->ioctl_wq); +} + +static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) +{ + struct hptiop_hba *hba = arg->hba; + u32 val; + struct hpt_iop_request_ioctl_command __iomem *req; + int ioctl_retry = 0; + + dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); + + /* + * check (in + out) buff size from application. + * outbuf must be dword aligned. + */ + if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > + hba->max_request_size + - sizeof(struct hpt_iop_request_header) + - 4 * sizeof(u32)) { + dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", + hba->host->host_no, + arg->inbuf_size, arg->outbuf_size); + arg->result = HPT_IOCTL_RESULT_FAILED; + return; + } + +retry: + spin_lock_irq(hba->host->host_lock); + + val = readl(&hba->iop->inbound_queue); + if (val == IOPMU_QUEUE_EMPTY) { + spin_unlock_irq(hba->host->host_lock); + dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); + arg->result = -1; + return; + } + + req = (struct hpt_iop_request_ioctl_command __iomem *) + ((unsigned long)hba->iop + val); + + writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), + &req->ioctl_code); + writel(arg->inbuf_size, &req->inbuf_size); + writel(arg->outbuf_size, &req->outbuf_size); + + /* + * use the buffer on the IOP local memory first, then copy it + * back to host. + * the caller's request buffer shoudl be little-endian. + */ + if (arg->inbuf_size) + memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); + + /* correct the controller ID for IOP */ + if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || + arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || + arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) + && arg->inbuf_size >= sizeof(u32)) + writel(0, req->buf); + + writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); + writel(0, &req->header.flags); + writel(offsetof(struct hpt_iop_request_ioctl_command, buf) + + arg->inbuf_size, &req->header.size); + writel((u32)(unsigned long)arg, &req->header.context); + writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, + &req->header.context_hi32); + writel(IOP_RESULT_PENDING, &req->header.result); + + arg->result = HPT_IOCTL_RESULT_FAILED; + arg->done = hptiop_ioctl_done; + + writel(val, &hba->iop->inbound_queue); + hptiop_pci_posting_flush(hba->iop); + + spin_unlock_irq(hba->host->host_lock); + + wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); + + if (arg->done != NULL) { + hptiop_reset_hba(hba); + if (ioctl_retry++ < 3) + goto retry; + } + + dprintk("hpt_iop_ioctl %x result %d\n", + arg->ioctl_code, arg->result); +} + +static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, + u32 insize, void *outbuf, u32 outsize) +{ + struct hpt_ioctl_k arg; + arg.hba = hba; + arg.ioctl_code = code; + arg.inbuf = inbuf; + arg.outbuf = outbuf; + arg.inbuf_size = insize; + arg.outbuf_size = outsize; + arg.bytes_returned = NULL; + hptiop_do_ioctl(&arg); + return arg.result; +} + +static inline int hpt_id_valid(__le32 id) +{ + return id != 0 && id != cpu_to_le32(0xffffffff); +} + +static int hptiop_get_controller_info(struct hptiop_hba *hba, + struct hpt_controller_info *pinfo) +{ + int id = 0; + + return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, + &id, sizeof(int), pinfo, sizeof(*pinfo)); +} + + +static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, + struct hpt_channel_info *pinfo) +{ + u32 ids[2]; + + ids[0] = 0; + ids[1] = bus; + return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, + ids, sizeof(ids), pinfo, sizeof(*pinfo)); + +} + +static int hptiop_get_logical_devices(struct hptiop_hba *hba, + __le32 *pids, int maxcount) +{ + int i; + u32 count = maxcount - 1; + + if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, + &count, sizeof(u32), + pids, sizeof(u32) * maxcount)) + return -1; + + maxcount = le32_to_cpu(pids[0]); + for (i = 0; i < maxcount; i++) + pids[i] = pids[i+1]; + + return maxcount; +} + +static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, + struct hpt_logical_device_info_v3 *pinfo) +{ + return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, + &id, sizeof(u32), + pinfo, sizeof(*pinfo)); +} + +static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) +{ + static char s[64]; + u32 flags = le32_to_cpu(devinfo->u.array.flags); + u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); + u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); + + if (flags & ARRAY_FLAG_DISABLED) + return "Disabled"; + else if (flags & ARRAY_FLAG_TRANSFORMING) + sprintf(s, "Expanding/Migrating %d.%d%%%s%s", + trans_prog / 100, + trans_prog % 100, + (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? + ", Critical" : "", + ((flags & ARRAY_FLAG_NEEDINITIALIZING) && + !(flags & ARRAY_FLAG_REBUILDING) && + !(flags & ARRAY_FLAG_INITIALIZING))? + ", Unintialized" : ""); + else if ((flags & ARRAY_FLAG_BROKEN) && + devinfo->u.array.array_type != AT_RAID6) + return "Critical"; + else if (flags & ARRAY_FLAG_REBUILDING) + sprintf(s, + (flags & ARRAY_FLAG_NEEDINITIALIZING)? + "%sBackground initializing %d.%d%%" : + "%sRebuilding %d.%d%%", + (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", + reb_prog / 100, + reb_prog % 100); + else if (flags & ARRAY_FLAG_VERIFYING) + sprintf(s, "%sVerifying %d.%d%%", + (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", + reb_prog / 100, + reb_prog % 100); + else if (flags & ARRAY_FLAG_INITIALIZING) + sprintf(s, "%sForground initializing %d.%d%%", + (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", + reb_prog / 100, + reb_prog % 100); + else if (flags & ARRAY_FLAG_NEEDTRANSFORM) + sprintf(s,"%s%s%s", "Need Expanding/Migrating", + (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", + ((flags & ARRAY_FLAG_NEEDINITIALIZING) && + !(flags & ARRAY_FLAG_REBUILDING) && + !(flags & ARRAY_FLAG_INITIALIZING))? + ", Unintialized" : ""); + else if (flags & ARRAY_FLAG_NEEDINITIALIZING && + !(flags & ARRAY_FLAG_REBUILDING) && + !(flags & ARRAY_FLAG_INITIALIZING)) + sprintf(s,"%sUninitialized", + (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); + else if ((flags & ARRAY_FLAG_NEEDBUILDING) || + (flags & ARRAY_FLAG_BROKEN)) + return "Critical"; + else + return "Normal"; + return s; +} + +static void hptiop_dump_devinfo(struct hptiop_hba *hba, + struct hptiop_getinfo *pinfo, __le32 id, int indent) +{ + struct hpt_logical_device_info_v3 devinfo; + int i; + u64 capacity; + + for (i = 0; i < indent; i++) + hptiop_copy_info(pinfo, "\t"); + + if (hptiop_get_device_info_v3(hba, id, &devinfo)) { + hptiop_copy_info(pinfo, "unknown\n"); + return; + } + + switch (devinfo.type) { + + case LDT_DEVICE: { + struct hd_driveid *driveid; + u32 flags = le32_to_cpu(devinfo.u.device.flags); + + driveid = (struct hd_driveid *)devinfo.u.device.ident; + /* model[] is 40 chars long, but we just want 20 chars here */ + driveid->model[20] = 0; + + if (indent) + if (flags & DEVICE_FLAG_DISABLED) + hptiop_copy_info(pinfo,"Missing\n"); + else + hptiop_copy_info(pinfo, "CH%d %s\n", + devinfo.u.device.path_id + 1, + driveid->model); + else { + capacity = le64_to_cpu(devinfo.capacity) * 512; + do_div(capacity, 1000000); + hptiop_copy_info(pinfo, + "CH%d %s, %lluMB, %s %s%s%s%s\n", + devinfo.u.device.path_id + 1, + driveid->model, + capacity, + (flags & DEVICE_FLAG_DISABLED)? + "Disabled" : "Normal", + devinfo.u.device.read_ahead_enabled? + "[RA]" : "", + devinfo.u.device.write_cache_enabled? + "[WC]" : "", + devinfo.u.device.TCQ_enabled? + "[TCQ]" : "", + devinfo.u.device.NCQ_enabled? + "[NCQ]" : "" + ); + } + break; + } + + case LDT_ARRAY: + if (devinfo.target_id != INVALID_TARGET_ID) + hptiop_copy_info(pinfo, "[DISK %d_%d] ", + devinfo.vbus_id, devinfo.target_id); + + capacity = le64_to_cpu(devinfo.capacity) * 512; + do_div(capacity, 1000000); + hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", + devinfo.u.array.name, + devinfo.u.array.array_type==AT_RAID0? "RAID0" : + devinfo.u.array.array_type==AT_RAID1? "RAID1" : + devinfo.u.array.array_type==AT_RAID5? "RAID5" : + devinfo.u.array.array_type==AT_RAID6? "RAID6" : + devinfo.u.array.array_type==AT_JBOD? "JBOD" : + "unknown", + capacity, + get_array_status(&devinfo)); + for (i = 0; i < devinfo.u.array.ndisk; i++) { + if (hpt_id_valid(devinfo.u.array.members[i])) { + if (cpu_to_le16(1<private_data; + struct hptiop_getinfo info; + int i, j, ndev; + struct hpt_controller_info con_info; + struct hpt_channel_info chan_info; + __le32 ids[32]; + + info.buffer = buf; + info.buflength = count; + info.bufoffset = ppos ? *ppos : 0; + info.filpos = 0; + info.buffillen = 0; + + if (hptiop_get_controller_info(hba, &con_info)) + return -EIO; + + for (i = 0; i < con_info.num_buses; i++) { + if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { + if (hpt_id_valid(chan_info.devices[0])) + hptiop_dump_devinfo(hba, &info, + chan_info.devices[0], 0); + if (hpt_id_valid(chan_info.devices[1])) + hptiop_dump_devinfo(hba, &info, + chan_info.devices[1], 0); + } + } + + ndev = hptiop_get_logical_devices(hba, ids, + sizeof(ids) / sizeof(ids[0])); + + /* + * if hptiop_get_logical_devices fails, ndev==-1 and it just + * output nothing here + */ + for (j = 0; j < ndev; j++) + hptiop_dump_devinfo(hba, &info, ids[j], 0); + + if (ppos) + *ppos += info.buffillen; + + return info.buffillen; +} + +static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct hptiop_hba *hba = file->private_data; + struct hpt_ioctl_u ioctl_u; + struct hpt_ioctl_k ioctl_k; + u32 bytes_returned; + int err = -EINVAL; + + if (copy_from_user(&ioctl_u, + (void __user *)arg, sizeof(struct hpt_ioctl_u))) + return -EINVAL; + + if (ioctl_u.magic != HPT_IOCTL_MAGIC) + return -EINVAL; + + ioctl_k.ioctl_code = ioctl_u.ioctl_code; + ioctl_k.inbuf = NULL; + ioctl_k.inbuf_size = ioctl_u.inbuf_size; + ioctl_k.outbuf = NULL; + ioctl_k.outbuf_size = ioctl_u.outbuf_size; + ioctl_k.hba = hba; + ioctl_k.bytes_returned = &bytes_returned; + + /* verify user buffer */ + if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, + ioctl_u.inbuf, ioctl_k.inbuf_size)) || + (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, + ioctl_u.outbuf, ioctl_k.outbuf_size)) || + (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, + ioctl_u.bytes_returned, sizeof(u32))) || + ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { + + dprintk("scsi%d: got bad user address\n", hba->host->host_no); + return -EINVAL; + } + + /* map buffer to kernel. */ + if (ioctl_k.inbuf_size) { + ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); + if (!ioctl_k.inbuf) { + dprintk("scsi%d: fail to alloc inbuf\n", + hba->host->host_no); + err = -ENOMEM; + goto err_exit; + } + + if (copy_from_user(ioctl_k.inbuf, + ioctl_u.inbuf, ioctl_k.inbuf_size)) { + goto err_exit; + } + } + + if (ioctl_k.outbuf_size) { + ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); + if (!ioctl_k.outbuf) { + dprintk("scsi%d: fail to alloc outbuf\n", + hba->host->host_no); + err = -ENOMEM; + goto err_exit; + } + } + + hptiop_do_ioctl(&ioctl_k); + + if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { + if (ioctl_k.outbuf_size && + copy_to_user(ioctl_u.outbuf, + ioctl_k.outbuf, ioctl_k.outbuf_size)) + goto err_exit; + + if (ioctl_u.bytes_returned && + copy_to_user(ioctl_u.bytes_returned, + &bytes_returned, sizeof(u32))) + goto err_exit; + + err = 0; + } + +err_exit: + kfree(ioctl_k.inbuf); + kfree(ioctl_k.outbuf); + + return err; +} + +static int hptiop_cdev_open(struct inode *inode, struct file *file) +{ + struct hptiop_hba *hba; + unsigned i = 0, minor = iminor(inode); + int ret = -ENODEV; + + spin_lock(&hptiop_hba_list_lock); + list_for_each_entry(hba, &hptiop_hba_list, link) { + if (i == minor) { + file->private_data = hba; + ret = 0; + goto out; + } + i++; + } + +out: + spin_unlock(&hptiop_hba_list_lock); + return ret; +} + +static struct file_operations hptiop_cdev_fops = { + .owner = THIS_MODULE, + .read = hptiop_cdev_read, + .ioctl = hptiop_cdev_ioctl, + .open = hptiop_cdev_open, +}; + static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) { struct Scsi_Host *host = class_to_shost(class_dev); @@ -771,13 +1296,19 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, goto unmap_pci_bar; } + if (scsi_add_host(host, &pcidev->dev)) { + printk(KERN_ERR "scsi%d: scsi_add_host failed\n", + hba->host->host_no); + goto unmap_pci_bar; + } + pci_set_drvdata(pcidev, host); if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, driver_name, hba)) { printk(KERN_ERR "scsi%d: request irq %d failed\n", hba->host->host_no, pcidev->irq); - goto unmap_pci_bar; + goto remove_scsi_host; } /* Allocate request mem */ @@ -824,12 +1355,9 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, if (hptiop_initialize_iop(hba)) goto free_request_mem; - if (scsi_add_host(host, &pcidev->dev)) { - printk(KERN_ERR "scsi%d: scsi_add_host failed\n", - hba->host->host_no); - goto free_request_mem; - } - + spin_lock(&hptiop_hba_list_lock); + list_add_tail(&hba->link, &hptiop_hba_list); + spin_unlock(&hptiop_hba_list_lock); scsi_scan_host(host); @@ -844,6 +1372,9 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, free_request_irq: free_irq(hba->pcidev->irq, hba); +remove_scsi_host: + scsi_remove_host(host); + unmap_pci_bar: iounmap(hba->iop); @@ -891,6 +1422,10 @@ static void hptiop_remove(struct pci_dev *pcidev) scsi_remove_host(host); + spin_lock(&hptiop_hba_list_lock); + list_del_init(&hba->link); + spin_unlock(&hptiop_hba_list_lock); + hptiop_shutdown(pcidev); free_irq(hba->pcidev->irq, hba); @@ -927,12 +1462,27 @@ static struct pci_driver hptiop_pci_driver = { static int __init hptiop_module_init(void) { + int error; + printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); - return pci_register_driver(&hptiop_pci_driver); + + error = pci_register_driver(&hptiop_pci_driver); + if (error < 0) + return error; + + hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); + if (hptiop_cdev_major < 0) { + printk(KERN_WARNING "unable to register hptiop device.\n"); + return hptiop_cdev_major; + } + + return 0; } static void __exit hptiop_module_exit(void) { + dprintk("hptiop_module_exit\n"); + unregister_chrdev(hptiop_cdev_major, "hptiop"); pci_unregister_driver(&hptiop_pci_driver); } diff --git a/trunk/drivers/scsi/ide-scsi.c b/trunk/drivers/scsi/ide-scsi.c index 94d1de55607f..f7b5d7372d26 100644 --- a/trunk/drivers/scsi/ide-scsi.c +++ b/trunk/drivers/scsi/ide-scsi.c @@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) /* No more interrupts */ if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); - local_irq_enable_in_hardirq(); + local_irq_enable(); if (status.b.check) rq->errors++; idescsi_end_request (drive, 1, 0); diff --git a/trunk/drivers/scsi/iscsi_tcp.c b/trunk/drivers/scsi/iscsi_tcp.c index 058f094f945a..848fb2aa4ca3 100644 --- a/trunk/drivers/scsi/iscsi_tcp.c +++ b/trunk/drivers/scsi/iscsi_tcp.c @@ -43,10 +43,13 @@ #include "iscsi_tcp.h" +#define ISCSI_TCP_VERSION "1.0-595" + MODULE_AUTHOR("Dmitry Yusupov , " "Alex Aizman "); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); +MODULE_VERSION(ISCSI_TCP_VERSION); /* #define DEBUG_TCP */ #define DEBUG_ASSERT @@ -182,19 +185,11 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) * must be called with session lock */ static void -iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_r2t_info *r2t; struct scsi_cmnd *sc; - /* flush ctask's r2t queues */ - while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, - sizeof(void*)); - debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); - } - sc = ctask->sc; if (unlikely(!sc)) return; @@ -379,7 +374,6 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) spin_unlock(&session->lock); return 0; } - rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); BUG_ON(!rc); @@ -405,7 +399,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_r2tsn = r2tsn + 1; tcp_ctask->xmstate |= XMSTATE_SOL_HDR; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - list_move_tail(&ctask->running, &conn->xmitqueue); + __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); scsi_queue_work(session->host, &conn->xmitwork); conn->r2t_pdus_cnt++; @@ -483,8 +477,6 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) case ISCSI_OP_SCSI_DATA_IN: tcp_conn->in.ctask = session->cmds[itt]; rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); - if (rc) - return rc; /* fall through */ case ISCSI_OP_SCSI_CMD_RSP: tcp_conn->in.ctask = session->cmds[itt]; @@ -492,7 +484,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) goto copy_hdr; spin_lock(&session->lock); - iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); + __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&session->lock); break; @@ -508,28 +500,13 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_NOOP_IN: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: - /* - * It is possible that we could get a PDU with a buffer larger - * than 8K, but there are no targets that currently do this. - * For now we fail until we find a vendor that needs it - */ - if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < - tcp_conn->in.datalen) { - printk(KERN_ERR "iscsi_tcp: received buffer of len %u " - "but conn buffer is only %u (opcode %0x)\n", - tcp_conn->in.datalen, - DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); - rc = ISCSI_ERR_PROTO; - break; - } - if (tcp_conn->in.datalen) goto copy_hdr; /* fall through */ - case ISCSI_OP_LOGOUT_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; @@ -546,7 +523,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) * skbs to complete the command then we have to copy the header * for later use */ - if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= + if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < (tcp_conn->in.datalen + tcp_conn->in.padding + (conn->datadgst_en ? 4 : 0))) { debug_tcp("Copying header for later use. in.copy %d in.datalen" @@ -637,9 +614,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, * byte counters. **/ static inline int -iscsi_tcp_copy(struct iscsi_conn *conn) +iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) { - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + void *buf = tcp_conn->data; int buf_size = tcp_conn->in.datalen; int buf_left = buf_size - tcp_conn->data_copied; int size = min(tcp_conn->in.copy, buf_left); @@ -650,7 +627,7 @@ iscsi_tcp_copy(struct iscsi_conn *conn) BUG_ON(size <= 0); rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, - (char*)conn->data + tcp_conn->data_copied, size); + (char*)buf + tcp_conn->data_copied, size); BUG_ON(rc); tcp_conn->in.offset += size; @@ -768,11 +745,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) done: /* check for non-exceptional status */ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { - debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", - (long)sc, sc->result, ctask->itt, - tcp_conn->in.hdr->flags); + debug_scsi("done [sc %lx res %d itt 0x%x]\n", + (long)sc, sc->result, ctask->itt); spin_lock(&conn->session->lock); - iscsi_tcp_cleanup_ctask(conn, ctask); + __iscsi_ctask_cleanup(conn, ctask); __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); spin_unlock(&conn->session->lock); } @@ -793,25 +769,26 @@ iscsi_data_recv(struct iscsi_conn *conn) break; case ISCSI_OP_SCSI_CMD_RSP: spin_lock(&conn->session->lock); - iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); + __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); spin_unlock(&conn->session->lock); case ISCSI_OP_TEXT_RSP: case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_NOOP_IN: case ISCSI_OP_ASYNC_EVENT: case ISCSI_OP_REJECT: /* * Collect data segment to the connection's data * placeholder */ - if (iscsi_tcp_copy(conn)) { + if (iscsi_tcp_copy(tcp_conn)) { rc = -EAGAIN; goto exit; } - rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, tcp_conn->in.datalen); if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) - iscsi_recv_digest_update(tcp_conn, conn->data, + iscsi_recv_digest_update(tcp_conn, tcp_conn->data, tcp_conn->in.datalen); break; default: @@ -866,7 +843,7 @@ iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, if (rc == -EAGAIN) goto nomore; else { - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + iscsi_conn_failure(conn, rc); return 0; } } @@ -920,7 +897,7 @@ iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, if (rc) { if (rc == -EAGAIN) goto again; - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + iscsi_conn_failure(conn, rc); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; @@ -1051,8 +1028,9 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) } static void -iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) +iscsi_conn_restore_callbacks(struct iscsi_conn *conn) { + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk = tcp_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ @@ -1330,7 +1308,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ctask->imm_count - ctask->unsol_count; - debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " + debug_scsi("cmd [itt %x total %d imm %d imm_data %d " "r2t_data %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, ctask->unsol_count, tcp_ctask->r2t_data_count); @@ -1658,7 +1636,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } solicit_again: /* - * send Data-Out within this R2T sequence. + * send Data-Out whitnin this R2T sequence. */ if (!r2t->data_count) goto data_out_done; @@ -1753,7 +1731,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_data_task *dtask = tcp_ctask->dtask; - int sent = 0, rc; + int sent, rc; tcp_ctask->xmstate &= ~XMSTATE_W_PAD; iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, @@ -1922,31 +1900,26 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; /* initial operational parameters */ tcp_conn->hdr_size = sizeof(struct iscsi_hdr); + tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; + + /* allocate initial PDU receive place holder */ + if (tcp_conn->data_size <= PAGE_SIZE) + tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); + else + tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, + get_order(tcp_conn->data_size)); + if (!tcp_conn->data) + goto max_recv_dlenght_alloc_fail; return cls_conn; +max_recv_dlenght_alloc_fail: + kfree(tcp_conn); tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; } -static void -iscsi_tcp_release_conn(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - - if (!tcp_conn->sock) - return; - - sock_hold(tcp_conn->sock->sk); - iscsi_conn_restore_callbacks(tcp_conn); - sock_put(tcp_conn->sock->sk); - - sock_release(tcp_conn->sock); - tcp_conn->sock = NULL; - conn->recv_lock = NULL; -} - static void iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { @@ -1957,7 +1930,6 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) if (conn->hdrdgst_en || conn->datadgst_en) digest = 1; - iscsi_tcp_release_conn(conn); iscsi_conn_teardown(cls_conn); /* now free tcp_conn */ @@ -1972,18 +1944,15 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) crypto_free_tfm(tcp_conn->data_rx_tfm); } + /* free conn->data, size = MaxRecvDataSegmentLength */ + if (tcp_conn->data_size <= PAGE_SIZE) + kfree(tcp_conn->data); + else + free_pages((unsigned long)tcp_conn->data, + get_order(tcp_conn->data_size)); kfree(tcp_conn); } -static void -iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - - iscsi_conn_stop(cls_conn, flag); - iscsi_tcp_release_conn(conn); -} - static int iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, @@ -2032,6 +2001,52 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, return 0; } +static void +iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +{ + struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_r2t_info *r2t; + + /* flush ctask's r2t queues */ + while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) + __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + + __iscsi_ctask_cleanup(conn, ctask); +} + +static void +iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct sock *sk; + + if (!tcp_conn->sock) + return; + + sk = tcp_conn->sock->sk; + write_lock_bh(&sk->sk_callback_lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(&sk->sk_callback_lock); +} + +static void +iscsi_tcp_terminate_conn(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + if (!tcp_conn->sock) + return; + + sock_hold(tcp_conn->sock->sk); + iscsi_conn_restore_callbacks(conn); + sock_put(tcp_conn->sock->sk); + + sock_release(tcp_conn->sock); + tcp_conn->sock = NULL; + conn->recv_lock = NULL; +} + /* called with host lock */ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, @@ -2042,7 +2057,6 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); tcp_mtask->xmstate = XMSTATE_IMM_HDR; - tcp_mtask->sent = 0; if (mtask->data_count) iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, @@ -2124,6 +2138,39 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, int value; switch(param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: { + char *saveptr = tcp_conn->data; + gfp_t flags = GFP_KERNEL; + + sscanf(buf, "%d", &value); + if (tcp_conn->data_size >= value) { + iscsi_set_param(cls_conn, param, buf, buflen); + break; + } + + spin_lock_bh(&session->lock); + if (conn->stop_stage == STOP_CONN_RECOVER) + flags = GFP_ATOMIC; + spin_unlock_bh(&session->lock); + + if (value <= PAGE_SIZE) + tcp_conn->data = kmalloc(value, flags); + else + tcp_conn->data = (void*)__get_free_pages(flags, + get_order(value)); + if (tcp_conn->data == NULL) { + tcp_conn->data = saveptr; + return -ENOMEM; + } + if (tcp_conn->data_size <= PAGE_SIZE) + kfree(saveptr); + else + free_pages((unsigned long)saveptr, + get_order(tcp_conn->data_size)); + iscsi_set_param(cls_conn, param, buf, buflen); + tcp_conn->data_size = value; + break; + } case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); tcp_conn->hdr_size = sizeof(struct iscsi_hdr); @@ -2314,7 +2361,8 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) } static struct scsi_host_template iscsi_sht = { - .name = "iSCSI Initiator over TCP/IP", + .name = "iSCSI Initiator over TCP/IP, v" + ISCSI_TCP_VERSION, .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .can_queue = ISCSI_XMIT_CMDS_MAX - 1, @@ -2366,7 +2414,10 @@ static struct iscsi_transport iscsi_tcp_transport = { .get_conn_param = iscsi_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_conn_start, - .stop_conn = iscsi_tcp_conn_stop, + .stop_conn = iscsi_conn_stop, + /* these are called as part of conn recovery */ + .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, + .terminate_conn = iscsi_tcp_terminate_conn, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_conn_get_stats, diff --git a/trunk/drivers/scsi/iscsi_tcp.h b/trunk/drivers/scsi/iscsi_tcp.h index 6a4ee704e46e..808302832e68 100644 --- a/trunk/drivers/scsi/iscsi_tcp.h +++ b/trunk/drivers/scsi/iscsi_tcp.h @@ -78,6 +78,8 @@ struct iscsi_tcp_conn { char hdrext[4*sizeof(__u16) + sizeof(__u32)]; int data_copied; + char *data; /* data placeholder */ + int data_size; /* actual recv_dlength */ int stop_stage; /* conn_stop() flag: * * stop to recover, * * stop to terminate */ diff --git a/trunk/drivers/scsi/libata-core.c b/trunk/drivers/scsi/libata-core.c index 73dd6c8deede..16fc2dd8f2f7 100644 --- a/trunk/drivers/scsi/libata-core.c +++ b/trunk/drivers/scsi/libata-core.c @@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) return rc; - scontrol = (scontrol & 0x0f0) | 0x304; + scontrol = (scontrol & 0x0f0) | 0x302; if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) return rc; diff --git a/trunk/drivers/scsi/libiscsi.c b/trunk/drivers/scsi/libiscsi.c index 5884cd26d53a..7e6e031cc41b 100644 --- a/trunk/drivers/scsi/libiscsi.c +++ b/trunk/drivers/scsi/libiscsi.c @@ -189,7 +189,6 @@ static void iscsi_complete_command(struct iscsi_session *session, { struct scsi_cmnd *sc = ctask->sc; - ctask->state = ISCSI_TASK_COMPLETED; ctask->sc = NULL; list_del_init(&ctask->running); __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); @@ -276,25 +275,6 @@ static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, return rc; } -static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) -{ - struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; - - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - conn->tmfrsp_pdus_cnt++; - - if (conn->tmabort_state != TMABORT_INITIAL) - return; - - if (tmf->response == ISCSI_TMF_RSP_COMPLETE) - conn->tmabort_state = TMABORT_SUCCESS; - else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) - conn->tmabort_state = TMABORT_NOT_FOUND; - else - conn->tmabort_state = TMABORT_FAILED; - wake_up(&conn->ehwait); -} - /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn @@ -360,10 +340,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, switch(opcode) { case ISCSI_OP_LOGOUT_RSP: - if (datalen) { - rc = ISCSI_ERR_PROTO; - break; - } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; /* fall through */ case ISCSI_OP_LOGIN_RSP: @@ -372,8 +348,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * login related PDU's exp_statsn is handled in * userspace */ - if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) - rc = ISCSI_ERR_CONN_FAILED; + rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -385,17 +360,25 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } - iscsi_tmf_rsp(conn, hdr); + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + conn->tmfrsp_pdus_cnt++; + if (conn->tmabort_state == TMABORT_INITIAL) { + conn->tmabort_state = + ((struct iscsi_tm_rsp *)hdr)-> + response == ISCSI_TMF_RSP_COMPLETE ? + TMABORT_SUCCESS:TMABORT_FAILED; + /* unblock eh_abort() */ + wake_up(&conn->ehwait); + } break; case ISCSI_OP_NOOP_IN: - if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { + if (hdr->ttt != ISCSI_RESERVED_TAG) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) - rc = ISCSI_ERR_CONN_FAILED; + rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -408,21 +391,14 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else if (itt == ISCSI_RESERVED_TAG) { switch(opcode) { case ISCSI_OP_NOOP_IN: - if (datalen) { - rc = ISCSI_ERR_PROTO; - break; - } - - rc = iscsi_check_assign_cmdsn(session, + if (!datalen) { + rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)hdr); - if (rc) - break; - - if (hdr->ttt == ISCSI_RESERVED_TAG) - break; - - if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) - rc = ISCSI_ERR_CONN_FAILED; + if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) + rc = iscsi_recv_pdu(conn->cls_conn, + hdr, NULL, 0); + } else + rc = ISCSI_ERR_PROTO; break; case ISCSI_OP_REJECT: /* we need sth like iscsi_reject_rsp()*/ @@ -592,24 +568,20 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } /* process command queue */ - spin_lock_bh(&conn->session->lock); - while (!list_empty(&conn->xmitqueue)) { + while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, + sizeof(void*))) { /* * iscsi tcp may readd the task to the xmitqueue to send * write data */ - conn->ctask = list_entry(conn->xmitqueue.next, - struct iscsi_cmd_task, running); - conn->ctask->state = ISCSI_TASK_RUNNING; - list_move_tail(conn->xmitqueue.next, &conn->run_list); + spin_lock_bh(&conn->session->lock); + if (list_empty(&conn->ctask->running)) + list_add_tail(&conn->ctask->running, &conn->run_list); spin_unlock_bh(&conn->session->lock); - rc = tt->xmit_cmd_task(conn, conn->ctask); if (rc) goto again; - spin_lock_bh(&conn->session->lock); } - spin_unlock_bh(&conn->session->lock); /* done with this ctask */ conn->ctask = NULL; @@ -719,7 +691,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.phase = session->age; sc->SCp.ptr = (char *)ctask; - ctask->state = ISCSI_TASK_PENDING; ctask->mtask = NULL; ctask->conn = conn; ctask->sc = sc; @@ -729,7 +700,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) session->tt->init_cmd_task(ctask); - list_add_tail(&ctask->running, &conn->xmitqueue); + __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); debug_scsi( "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", @@ -1006,28 +977,32 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, /* * xmit mutex and session lock must be held */ -static struct iscsi_mgmt_task * -iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) -{ - int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); - struct iscsi_mgmt_task *task; - - debug_scsi("searching %d tasks\n", nr_tasks); - - for (i = 0; i < nr_tasks; i++) { - __kfifo_get(fifo, (void*)&task, sizeof(void*)); - debug_scsi("check task %u\n", task->itt); - - if (task->itt == itt) { - debug_scsi("matched task\n"); - return task; - } - - __kfifo_put(fifo, (void*)&task, sizeof(void*)); - } - return NULL; +#define iscsi_remove_task(tasktype) \ +static struct iscsi_##tasktype * \ +iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ +{ \ + int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ + struct iscsi_##tasktype *task; \ + \ + debug_scsi("searching %d tasks\n", nr_tasks); \ + \ + for (i = 0; i < nr_tasks; i++) { \ + __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ + debug_scsi("check task %u\n", task->itt); \ + \ + if (task->itt == itt) { \ + debug_scsi("matched task\n"); \ + return task; \ + } \ + \ + __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ + } \ + return NULL; \ } +iscsi_remove_task(mgmt_task); +iscsi_remove_task(cmd_task); + static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) { struct iscsi_conn *conn = ctask->conn; @@ -1052,13 +1027,12 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, { struct scsi_cmnd *sc; - sc = ctask->sc; - if (!sc) - return; - conn->session->tt->cleanup_cmd_task(conn, ctask); iscsi_ctask_mtask_cleanup(ctask); + sc = ctask->sc; + if (!sc) + return; sc->result = err; sc->resid = sc->request_bufflen; iscsi_complete_command(conn->session, ctask); @@ -1069,6 +1043,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; struct iscsi_conn *conn = ctask->conn; struct iscsi_session *session = conn->session; + struct iscsi_cmd_task *pending_ctask; int rc; conn->eh_abort_cnt++; @@ -1086,11 +1061,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; /* ctask completed before time out */ - if (!ctask->sc) { - spin_unlock_bh(&session->lock); - debug_scsi("sc completed while abort in progress\n"); - goto success_rel_mutex; - } + if (!ctask->sc) + goto success; /* what should we do here ? */ if (conn->ctask == ctask) { @@ -1099,8 +1071,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; } - if (ctask->state == ISCSI_TASK_PENDING) - goto success_cleanup; + /* check for the easy pending cmd abort */ + pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); + if (pending_ctask) { + /* iscsi_tcp queues write transfers on the xmitqueue */ + if (list_empty(&pending_ctask->running)) { + debug_scsi("found pending task\n"); + goto success; + } else + __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, + sizeof(void*)); + } conn->tmabort_state = TMABORT_INITIAL; @@ -1108,31 +1089,25 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) rc = iscsi_exec_abort_task(sc, ctask); spin_lock_bh(&session->lock); + iscsi_ctask_mtask_cleanup(ctask); if (rc || sc->SCp.phase != session->age || session->state != ISCSI_STATE_LOGGED_IN) goto failed; - iscsi_ctask_mtask_cleanup(ctask); - switch (conn->tmabort_state) { - case TMABORT_SUCCESS: - goto success_cleanup; - case TMABORT_NOT_FOUND: - if (!ctask->sc) { - /* ctask completed before tmf abort response */ - spin_unlock_bh(&session->lock); - debug_scsi("sc completed while abort in progress\n"); - goto success_rel_mutex; - } - /* fall through */ - default: - /* timedout or failed */ + /* ctask completed before tmf abort response */ + if (!ctask->sc) { + debug_scsi("sc completed while abort in progress\n"); + goto success; + } + + if (conn->tmabort_state != TMABORT_SUCCESS) { spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); goto failed; } -success_cleanup: +success: debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); spin_unlock_bh(&session->lock); @@ -1146,7 +1121,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) spin_unlock(&session->lock); write_unlock_bh(conn->recv_lock); -success_rel_mutex: mutex_unlock(&conn->xmitmutex); return SUCCESS; @@ -1289,7 +1263,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (cmd_task_size) ctask->dd_data = &ctask[1]; ctask->itt = cmd_i; - INIT_LIST_HEAD(&ctask->running); } spin_lock_init(&session->lock); @@ -1309,7 +1282,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (mgmt_task_size) mtask->dd_data = &mtask[1]; mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; - INIT_LIST_HEAD(&mtask->running); } if (scsi_add_host(shost, NULL)) @@ -1350,18 +1322,15 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); - struct module *owner = cls_session->transport->owner; scsi_remove_host(shost); iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); iscsi_pool_free(&session->cmdpool, (void**)session->cmds); - kfree(session->targetname); - iscsi_destroy_session(cls_session); scsi_host_put(shost); - module_put(owner); + module_put(cls_session->transport->owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); @@ -1392,7 +1361,12 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) conn->tmabort_state = TMABORT_INITIAL; INIT_LIST_HEAD(&conn->run_list); INIT_LIST_HEAD(&conn->mgmt_run_list); - INIT_LIST_HEAD(&conn->xmitqueue); + + /* initialize general xmit PDU commands queue */ + conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), + GFP_KERNEL, NULL); + if (conn->xmitqueue == ERR_PTR(-ENOMEM)) + goto xmitqueue_alloc_fail; /* initialize general immediate & non-immediate PDU commands queue */ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), @@ -1420,7 +1394,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); if (!data) goto login_mtask_data_alloc_fail; - conn->login_mtask->data = conn->data = data; + conn->login_mtask->data = data; init_timer(&conn->tmabort_timer); mutex_init(&conn->xmitmutex); @@ -1436,6 +1410,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) mgmtqueue_alloc_fail: kfifo_free(conn->immqueue); immqueue_alloc_fail: + kfifo_free(conn->xmitqueue); +xmitqueue_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } @@ -1456,6 +1432,12 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); mutex_lock(&conn->xmitmutex); + if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { + if (session->tt->suspend_conn_recv) + session->tt->suspend_conn_recv(conn); + + session->tt->terminate_conn(conn); + } spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; @@ -1492,8 +1474,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) } spin_lock_bh(&session->lock); - kfree(conn->data); - kfree(conn->persistent_address); + kfree(conn->login_mtask->data); __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, sizeof(void*)); list_del(&conn->item); @@ -1508,6 +1489,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; spin_unlock_bh(&session->lock); + kfifo_free(conn->xmitqueue); kfifo_free(conn->immqueue); kfifo_free(conn->mgmtqueue); @@ -1590,7 +1572,7 @@ static void fail_all_commands(struct iscsi_conn *conn) struct iscsi_cmd_task *ctask, *tmp; /* flush pending */ - list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { + while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, ctask->itt); fail_command(conn, ctask, DID_BUS_BUSY << 16); @@ -1633,9 +1615,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); spin_unlock_bh(&session->lock); - write_lock_bh(conn->recv_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(conn->recv_lock); + if (session->tt->suspend_conn_recv) + session->tt->suspend_conn_recv(conn); mutex_lock(&conn->xmitmutex); /* @@ -1654,6 +1635,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, } } + session->tt->terminate_conn(conn); /* * flush queues. */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_attr.c b/trunk/drivers/scsi/lpfc/lpfc_attr.c index d384c16f4a87..5c68cdd8736f 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_attr.c +++ b/trunk/drivers/scsi/lpfc/lpfc_attr.c @@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host) pmboxq->mb.mbxCommand = MBX_DOWN_LINK; pmboxq->mb.mbxOwner = OWN_HOST; - mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); @@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) phba->sysfs_mbox.mbox == NULL ) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EAGAIN; + return -EINVAL; } } @@ -1000,15 +1000,14 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - lpfc_mbox_tmo_val(phba, - phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); + phba->fc_ratov * 2); spin_lock_irq(phba->host->host_lock); } if (rc != MBX_SUCCESS) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; + return -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } @@ -1017,7 +1016,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EAGAIN; + return -EINVAL; } memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); @@ -1211,10 +1210,8 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; - struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; - unsigned long seconds; int rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1275,103 +1272,22 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; - hs->link_failure_count -= lso->link_failure_count; - hs->loss_of_sync_count -= lso->loss_of_sync_count; - hs->loss_of_signal_count -= lso->loss_of_signal_count; - hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; - hs->invalid_tx_word_count -= lso->invalid_tx_word_count; - hs->invalid_crc_count -= lso->invalid_crc_count; - hs->error_frames -= lso->error_frames; - if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); - hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); - hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; - seconds = get_seconds(); - if (seconds < psli->stats_start) - hs->seconds_since_last_reset = seconds + - ((unsigned long)-1 - psli->stats_start); - else - hs->seconds_since_last_reset = seconds - psli->stats_start; +/* FIX ME */ + /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ return hs; } -static void -lpfc_reset_stats(struct Scsi_Host *shost) -{ - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; - LPFC_MBOXQ_t *pmboxq; - MAILBOX_t *pmb; - int rc = 0; - - pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmboxq) - return; - memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); - - pmb = &pmboxq->mb; - pmb->mbxCommand = MBX_READ_STATUS; - pmb->mbxOwner = OWN_HOST; - pmb->un.varWords[0] = 0x1; /* reset request */ - pmboxq->context1 = NULL; - - if ((phba->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) - rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); - - if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else - mempool_free(pmboxq, phba->mbox_mem_pool); - return; - } - - memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); - pmb->mbxCommand = MBX_READ_LNK_STAT; - pmb->mbxOwner = OWN_HOST; - pmboxq->context1 = NULL; - - if ((phba->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) - rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); - - if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else - mempool_free( pmboxq, phba->mbox_mem_pool); - return; - } - - lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; - lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; - lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; - lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; - lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; - lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; - lso->error_frames = pmb->un.varRdLnk.crcCnt; - lso->link_events = (phba->fc_eventTag >> 1); - - psli->stats_start = get_seconds(); - - return; -} /* * The LPFC driver treats linkdown handling as target loss events so there @@ -1515,7 +1431,8 @@ struct fc_function_template lpfc_transport_functions = { */ .get_fc_host_stats = lpfc_get_stats, - .reset_fc_host_stats = lpfc_reset_stats, + + /* the LPFC driver doesn't support resetting stats yet */ .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, diff --git a/trunk/drivers/scsi/lpfc/lpfc_crtn.h b/trunk/drivers/scsi/lpfc/lpfc_crtn.h index 2a176467f71b..517e9e4dd461 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_crtn.h +++ b/trunk/drivers/scsi/lpfc/lpfc_crtn.h @@ -127,7 +127,6 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); -int lpfc_mbox_tmo_val(struct lpfc_hba *, int); int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); diff --git a/trunk/drivers/scsi/lpfc/lpfc_ct.c b/trunk/drivers/scsi/lpfc/lpfc_ct.c index bbb7310210b0..b65ee57af53e 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_ct.c +++ b/trunk/drivers/scsi/lpfc/lpfc_ct.c @@ -131,7 +131,6 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, } ct_unsol_event_exit_piocbq: - list_del(&head); if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); @@ -482,7 +481,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0208 NameServer Rsp " + "%d:0239 NameServer Rsp " "Data: x%x\n", phba->brd_no, phba->fc_flag); @@ -589,9 +588,13 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_decode_firmware_rev(phba, fwrev, 0); - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); - return; + if (phba->Port[0]) { + sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, + phba->Port, fwrev, lpfc_release_version); + } else { + sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, + fwrev, lpfc_release_version); + } } /* diff --git a/trunk/drivers/scsi/lpfc/lpfc_els.c b/trunk/drivers/scsi/lpfc/lpfc_els.c index 3567de613162..b89f6cb641e6 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_els.c +++ b/trunk/drivers/scsi/lpfc/lpfc_els.c @@ -1848,12 +1848,9 @@ static void lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) { - IOCB_t *irsp; struct lpfc_nodelist *ndlp; LPFC_MBOXQ_t *mbox = NULL; - irsp = &rspiocb->iocb; - ndlp = (struct lpfc_nodelist *) cmdiocb->context1; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -1896,15 +1893,9 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, mempool_free( mbox, phba->mbox_mem_pool); } else { mempool_free( mbox, phba->mbox_mem_pool); - /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ - if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || - (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - ndlp = NULL; - } + if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + ndlp = NULL; } } } @@ -2848,7 +2839,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Xmit ELS RPS ACC response tag */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0118 Xmit ELS RPS ACC response tag x%x " + "%d:0128 Xmit ELS RPS ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -2957,7 +2948,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0120 Xmit ELS RPL ACC response tag x%x " + "%d:0128 Xmit ELS RPL ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -3118,7 +3109,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist *ndlp, *next_ndlp; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", phba->brd_no); icmd = &cmdiocb->iocb; diff --git a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c index b2f1552f1848..4d6cf990c4fc 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1557,8 +1557,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } - - spin_lock_irq(phba->host->host_lock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -1571,7 +1569,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mempool_free(mb, phba->mbox_mem_pool); } } - spin_unlock_irq(phba->host->host_lock); lpfc_els_abort(phba,ndlp,0); spin_lock_irq(phba->host->host_lock); @@ -1785,7 +1782,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to REGLOGIN */ /* FIND node DID reglogin */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0901 FIND node DID reglogin" + "%d:0931 FIND node DID reglogin" " Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1808,7 +1805,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to PRLI */ /* FIND node DID prli */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0902 FIND node DID prli " + "%d:0931 FIND node DID prli " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1831,7 +1828,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to NPR */ /* FIND node DID npr */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0903 FIND node DID npr " + "%d:0931 FIND node DID npr " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1854,7 +1851,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to UNUSED */ /* FIND node DID unused */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0905 FIND node DID unused " + "%d:0931 FIND node DID unused " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -2338,7 +2335,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0206 Device Discovery " + "%d:0226 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; @@ -2368,7 +2365,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) if (!clearlambox) { clrlaerr = 1; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0207 Device Discovery " + "%d:0226 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index f6948ffe689a..ef47b824cbed 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -1379,7 +1379,6 @@ lpfc_offline(struct lpfc_hba * phba) /* stop all timers associated with this hba */ lpfc_stop_timer(phba); phba->work_hba_events = 0; - phba->work_ha = 0; lpfc_printf_log(phba, KERN_WARNING, @@ -1617,11 +1616,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* - * Set initial can_queue value since 0 is no longer supported and - * scsi_add_host will fail. This will be adjusted later based on the - * max xri value determined in hba setup. - */ + /* We can rely on a queue depth attribute only after SLI HBA setup */ host->can_queue = phba->cfg_hba_queue_depth - 10; /* Tell the midlayer we support 16 byte commands */ @@ -1661,12 +1656,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } - /* - * hba setup may have changed the hba_queue_depth so we need to adjust - * the value of can_queue. - */ - host->can_queue = phba->cfg_hba_queue_depth - 10; - lpfc_discovery_wait(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { diff --git a/trunk/drivers/scsi/lpfc/lpfc_mbox.c b/trunk/drivers/scsi/lpfc/lpfc_mbox.c index 4d016c2a1b26..e42f22aaf71b 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mbox.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mbox.c @@ -651,19 +651,3 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } - -int -lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) -{ - switch (cmd) { - case MBX_WRITE_NV: /* 0x03 */ - case MBX_UPDATE_CFG: /* 0x1B */ - case MBX_DOWN_LOAD: /* 0x1C */ - case MBX_DEL_LD_ENTRY: /* 0x1D */ - case MBX_LOAD_AREA: /* 0x81 */ - case MBX_FLASH_WR_ULA: /* 0x98 */ - case MBX_LOAD_EXP_ROM: /* 0x9C */ - return LPFC_MBOX_TMO_FLASH_CMD; - } - return LPFC_MBOX_TMO; -} diff --git a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c index 20449a8dd53d..bd0b0e293d63 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* Abort outstanding I/O on NPort */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0205 Abort outstanding I/O on NPort x%x " + "%d:0201 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -393,20 +393,6 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, mbox->context2 = ndlp; ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - /* - * If there is an outstanding PLOGI issued, abort it before - * sending ACC rsp for received PLOGI. If pending plogi - * is not canceled here, the plogi will be rejected by - * remote port and will be retried. On a configuration with - * single discovery thread, this will cause a huge delay in - * discovery. Also this will cause multiple state machines - * running in parallel for this node. - */ - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp, 1); - } - lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); return 1; @@ -1615,13 +1601,7 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, lpfc_rcv_padisc(phba, ndlp, cmdiocb); - /* - * Do not start discovery if discovery is about to start - * or discovery in progress for this node. Starting discovery - * here will affect the counting of discovery threads. - */ - if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { if (ndlp->nlp_flag & NLP_NPR_ADISC) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; ndlp->nlp_state = NLP_STE_ADISC_ISSUE; diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.c b/trunk/drivers/scsi/lpfc/lpfc_scsi.c index a8816a8738f8..a760a44173df 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.c +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.c @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -842,21 +841,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } -static void -lpfc_block_error_handler(struct scsi_cmnd *cmnd) -{ - struct Scsi_Host *shost = cmnd->device->host; - struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - - spin_lock_irq(shost->host_lock); - while (rport->port_state == FC_PORTSTATE_BLOCKED) { - spin_unlock_irq(shost->host_lock); - msleep(1000); - spin_lock_irq(shost->host_lock); - } - spin_unlock_irq(shost->host_lock); - return; -} static int lpfc_abort_handler(struct scsi_cmnd *cmnd) @@ -871,7 +855,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) unsigned int loop_count = 0; int ret = SUCCESS; - lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; @@ -974,7 +957,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) int ret = FAILED; int cnt, loopcnt; - lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until @@ -1091,7 +1073,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int cnt, loopcnt; struct lpfc_scsi_buf * lpfc_cmd; - lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); @@ -1123,7 +1104,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0700 Bus Reset on target %d failed\n", + "%d:0713 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c index 70f4d5a1348e..350a625fa224 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.c +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c @@ -320,8 +320,7 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) kfree(old_arr); return iotag; } - } else - spin_unlock_irq(phba->host->host_lock); + } lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -970,11 +969,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0314 IOCB cmd 0x%x" - " processed. Skipping" - " completion", phba->brd_no, - irsp->ulpCommand); + printk(KERN_INFO "%s: IOCB cmd 0x%x processed." + " Skipping completion\n", __FUNCTION__, + irsp->ulpCommand); break; } @@ -1107,7 +1104,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, if (unlikely(irsp->ulpStatus)) { /* Rsp ring error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0336 Rsp Ring %d error: IOCB Data: " + "%d:0326 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], @@ -1125,11 +1122,9 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0333 IOCB cmd 0x%x" - " processed. Skipping" - " completion\n", phba->brd_no, - irsp->ulpCommand); + printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " + "Skipping completion\n", __FUNCTION__, + irsp->ulpCommand); break; } @@ -1160,7 +1155,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0334 Unknown IOCB command " + "%d:0321 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", phba->brd_no, type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, @@ -1243,7 +1238,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0303 Ring %d handler: portRspPut %d " + "%d:0312 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); @@ -1388,7 +1383,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0335 Unknown IOCB command " + "%d:0321 Unknown IOCB command " "Data: x%x x%x x%x x%x\n", phba->brd_no, irsp->ulpCommand, @@ -1404,11 +1399,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, next_iocb, &saveq->list, list) { - list_del(&rspiocbp->list); lpfc_sli_release_iocbq(phba, rspiocbp); } } + lpfc_sli_release_iocbq(phba, saveq); } } @@ -1716,13 +1711,15 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) phba->fc_myDID = 0; phba->fc_prevDID = 0; + psli->sli_flag = 0; + /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -1763,7 +1760,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, + "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, phba->hba_state, psli->sli_flag); word0 = 0; @@ -1795,9 +1792,6 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); - memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); - psli->stats_start = get_seconds(); - if (skip_post) mdelay(100); else @@ -1908,9 +1902,6 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) } while (resetcount < 2 && !done) { - spin_lock_irq(phba->host->host_lock); - phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); phba->hba_state = LPFC_STATE_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); @@ -1918,9 +1909,6 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) if (rc) break; - spin_lock_irq(phba->host->host_lock); - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 @@ -2206,8 +2194,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) return (MBX_NOT_FINISHED); } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); + mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); } /* Mailbox cmd issue */ @@ -2267,6 +2254,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) break; case MBX_POLL: + i = 0; psli->mbox_active = NULL; if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ @@ -2280,14 +2268,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); - i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); - i *= 1000; /* Convert to ms */ - /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->hba_state > LPFC_WARM_START))) { - if (i-- <= 0) { + if (i++ >= 100) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); @@ -2305,7 +2290,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Can be in interrupt context, do not sleep */ /* (or might be called with interrupts disabled) */ - mdelay(1); + mdelay(i); spin_lock_irqsave(phba->host->host_lock, drvr_flag); @@ -3020,7 +3005,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0338 IOCB wait timeout error - no " + "%d:0329 IOCB wait timeout error - no " "wake response Data x%x\n", phba->brd_no, timeout); retval = IOCB_TIMEDOUT; diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.h b/trunk/drivers/scsi/lpfc/lpfc_sli.h index e26de6809358..d8ef0d2894d4 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.h +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.h @@ -172,18 +172,6 @@ struct lpfc_sli_stat { uint32_t mbox_busy; /* Mailbox cmd busy */ }; -/* Structure to store link status values when port stats are reset */ -struct lpfc_lnk_stat { - uint32_t link_failure_count; - uint32_t loss_of_sync_count; - uint32_t loss_of_signal_count; - uint32_t prim_seq_protocol_err_count; - uint32_t invalid_tx_word_count; - uint32_t invalid_crc_count; - uint32_t error_frames; - uint32_t link_events; -}; - /* Structure used to hold SLI information */ struct lpfc_sli { uint32_t num_rings; @@ -213,8 +201,6 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ - unsigned long stats_start; /* in seconds */ - struct lpfc_lnk_stat lnk_stat_offsets; }; /* Given a pointer to the start of the ring, and the slot number of @@ -225,9 +211,3 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ -#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write - * or erase cmds. This is especially - * long because of the potential of - * multiple flash erases that can be - * spawned. - */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_version.h b/trunk/drivers/scsi/lpfc/lpfc_version.h index c7091ea29f3f..10e89c6ae823 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_version.h +++ b/trunk/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.9" +#define LPFC_DRIVER_VERSION "8.1.7" #define LPFC_DRIVER_NAME "lpfc" diff --git a/trunk/drivers/scsi/megaraid/mega_common.h b/trunk/drivers/scsi/megaraid/mega_common.h index 8cd0bd1d0f7c..4675343228ad 100644 --- a/trunk/drivers/scsi/megaraid/mega_common.h +++ b/trunk/drivers/scsi/megaraid/mega_common.h @@ -37,12 +37,6 @@ #define LSI_MAX_CHANNELS 16 #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) -#define HBA_SIGNATURE_64_BIT 0x299 -#define PCI_CONF_AMISIG64 0xa4 - -#define MEGA_SCSI_INQ_EVPD 1 -#define MEGA_INVALID_FIELD_IN_CDB 0x24 - /** * scb_t - scsi command control block diff --git a/trunk/drivers/scsi/megaraid/megaraid_ioctl.h b/trunk/drivers/scsi/megaraid/megaraid_ioctl.h index b8aa34202ec3..bdaee144a1c3 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/trunk/drivers/scsi/megaraid/megaraid_ioctl.h @@ -132,10 +132,6 @@ typedef struct uioc { /* Driver Data: */ void __user * user_data; uint32_t user_data_len; - - /* 64bit alignment */ - uint32_t pad_for_64bit_align; - mraid_passthru_t __user *user_pthru; mraid_passthru_t *pthru32; diff --git a/trunk/drivers/scsi/megaraid/megaraid_mbox.c b/trunk/drivers/scsi/megaraid/megaraid_mbox.c index cd982c877da0..92715130ac09 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mbox.c +++ b/trunk/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.9 (Jul 16 2006) + * Version : v2.20.4.8 (Apr 11 2006) * * Authors: * Atul Mukker @@ -720,7 +720,6 @@ megaraid_init_mbox(adapter_t *adapter) struct pci_dev *pdev; mraid_device_t *raid_dev; int i; - uint32_t magic64; adapter->ito = MBOX_TIMEOUT; @@ -864,33 +863,12 @@ megaraid_init_mbox(adapter_t *adapter) // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range - pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); - - if (((magic64 == HBA_SIGNATURE_64_BIT) && - ((adapter->pdev->subsystem_device != - PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || - (adapter->pdev->subsystem_device != - PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || - (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && - adapter->pdev->device == PCI_DEVICE_ID_VERDE) || - (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && - adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || - (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && - adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || - (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && - adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || - (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && - adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { - if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { - con_log(CL_ANN, (KERN_WARNING - "megaraid: DMA mask for 64-bit failed\n")); + if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { - if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { - con_log(CL_ANN, (KERN_WARNING - "megaraid: 32-bit DMA mask failed\n")); - goto out_free_sysfs_res; - } - } + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not set DMA mask for 64-bit.\n")); + + goto out_free_sysfs_res; } // setup tasklet for DPC @@ -1644,14 +1622,6 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } - if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { - scp->sense_buffer[0] = 0x70; - scp->sense_buffer[2] = ILLEGAL_REQUEST; - scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; - scp->result = CHECK_CONDITION << 1; - return NULL; - } - /* Fall through */ case READ_CAPACITY: diff --git a/trunk/drivers/scsi/megaraid/megaraid_mbox.h b/trunk/drivers/scsi/megaraid/megaraid_mbox.h index 2b5a3285f799..868fb0ec93e7 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mbox.h +++ b/trunk/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.9" -#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.4.8" +#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" /* diff --git a/trunk/drivers/scsi/megaraid/megaraid_mm.c b/trunk/drivers/scsi/megaraid/megaraid_mm.c index d85b9a8f1b8d..e8f534fb336b 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mm.c +++ b/trunk/drivers/scsi/megaraid/megaraid_mm.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mm.c - * Version : v2.20.2.7 (Jul 16 2006) + * Version : v2.20.2.6 (Mar 7 2005) * * Common management module */ diff --git a/trunk/drivers/scsi/megaraid/megaraid_mm.h b/trunk/drivers/scsi/megaraid/megaraid_mm.h index c8762b2b8ed1..3d9e67d6849d 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mm.h +++ b/trunk/drivers/scsi/megaraid/megaraid_mm.h @@ -27,9 +27,9 @@ #include "megaraid_ioctl.h" -#define LSI_COMMON_MOD_VERSION "2.20.2.7" +#define LSI_COMMON_MOD_VERSION "2.20.2.6" #define LSI_COMMON_MOD_EXT_VERSION \ - "(Release Date: Sun Jul 16 00:01:03 EST 2006)" + "(Release Date: Mon Mar 7 00:01:03 EST 2005)" #define LSI_DBGLVL dbglevel diff --git a/trunk/drivers/scsi/pdc_adma.c b/trunk/drivers/scsi/pdc_adma.c index efc8fff1d250..d1f38c32aa15 100644 --- a/trunk/drivers/scsi/pdc_adma.c +++ b/trunk/drivers/scsi/pdc_adma.c @@ -183,8 +183,7 @@ static struct ata_port_info adma_port_info[] = { { .sht = &adma_ata_sht, .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | - ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | - ATA_FLAG_PIO_POLLING, + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, .pio_mask = 0x10, /* pio4 */ .udma_mask = 0x1f, /* udma0-4 */ .port_ops = &adma_ata_ops, diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h index 0930260aec2c..139ea0e27fd7 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_def.h +++ b/trunk/drivers/scsi/qla2xxx/qla_def.h @@ -487,7 +487,6 @@ typedef struct { #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ /* used. */ -#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c index 859649160caa..9758dba95542 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_init.c +++ b/trunk/drivers/scsi/qla2xxx/qla_init.c @@ -3063,7 +3063,6 @@ qla2x00_update_fcports(scsi_qla_host_t *ha) int qla2x00_abort_isp(scsi_qla_host_t *ha) { - int rval; unsigned long flags = 0; uint16_t cnt; srb_t *sp; @@ -3120,16 +3119,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); - - if (ha->eft) { - rval = qla2x00_trace_control(ha, TC_ENABLE, - ha->eft_dma, EFT_NUM_BUFFERS); - if (rval) { - qla_printk(KERN_WARNING, ha, - "Unable to reinitialize EFT " - "(%d).\n", rval); - } - } } else { /* failed the ISP abort */ ha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { diff --git a/trunk/drivers/scsi/qla2xxx/qla_iocb.c b/trunk/drivers/scsi/qla2xxx/qla_iocb.c index c5b3c610a32a..2b60a27eff0b 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_iocb.c +++ b/trunk/drivers/scsi/qla2xxx/qla_iocb.c @@ -471,7 +471,6 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, mrk24->nport_handle = cpu_to_le16(loop_id); mrk24->lun[1] = LSB(lun); mrk24->lun[2] = MSB(lun); - host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c index de0613135f70..795bf15b1b8f 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_isr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c @@ -587,11 +587,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; - - case MBA_TRACE_NOTIFICATION: - DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", - ha->host_no, mb[1], mb[2])); - break; } } diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index 65cbe2f5eea2..ec7ebb6037e6 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -744,6 +744,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -754,7 +755,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - if (!fcport) + sp = (srb_t *) CMD_SP(cmd); + if (!sp || !fcport) return ret; qla_printk(KERN_INFO, ha, @@ -873,6 +875,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -883,7 +886,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - if (!fcport) + sp = (srb_t *) CMD_SP(cmd); + if (!sp || !fcport) return ret; qla_printk(KERN_INFO, ha, @@ -932,6 +936,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; + srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -942,7 +947,8 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - if (!fcport) + sp = (srb_t *) CMD_SP(cmd); + if (!sp || !fcport) return ret; qla_printk(KERN_INFO, ha, @@ -2238,6 +2244,9 @@ qla2x00_do_dpc(void *data) next_loopid = 0; list_for_each_entry(fcport, &ha->fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + /* * If the port is not ONLINE then try to login * to it if we haven't run out of retries. diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h index 971259032ef7..d2d683440659 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_version.h +++ b/trunk/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.01.07-k1" +#define QLA2XXX_VERSION "8.01.05-k3" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 1 -#define QLA_DRIVER_PATCH_VER 7 +#define QLA_DRIVER_PATCH_VER 5 #define QLA_DRIVER_BETA_VER 0 diff --git a/trunk/drivers/scsi/sata_via.c b/trunk/drivers/scsi/sata_via.c index 01d40369a8a5..03baec2191bf 100644 --- a/trunk/drivers/scsi/sata_via.c +++ b/trunk/drivers/scsi/sata_via.c @@ -74,7 +74,6 @@ enum { static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); -static void vt6420_error_handler(struct ata_port *ap); static const struct pci_device_id svia_pci_tbl[] = { { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, @@ -108,38 +107,7 @@ static struct scsi_host_template svia_sht = { .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations vt6420_sata_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, - - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, - .data_xfer = ata_pio_data_xfer, - - .freeze = ata_bmdma_freeze, - .thaw = ata_bmdma_thaw, - .error_handler = vt6420_error_handler, - .post_internal_cmd = ata_bmdma_post_internal_cmd, - - .irq_handler = ata_interrupt, - .irq_clear = ata_bmdma_irq_clear, - - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, -}; - -static const struct ata_port_operations vt6421_sata_ops = { +static const struct ata_port_operations svia_sata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -173,13 +141,13 @@ static const struct ata_port_operations vt6421_sata_ops = { .host_stop = ata_host_stop, }; -static struct ata_port_info vt6420_port_info = { +static struct ata_port_info svia_port_info = { .sht = &svia_sht, .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x7f, - .port_ops = &vt6420_sata_ops, + .port_ops = &svia_sata_ops, }; MODULE_AUTHOR("Jeff Garzik"); @@ -202,81 +170,6 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); } -/** - * vt6420_prereset - prereset for vt6420 - * @ap: target ATA port - * - * SCR registers on vt6420 are pieces of shit and may hang the - * whole machine completely if accessed with the wrong timing. - * To avoid such catastrophe, vt6420 doesn't provide generic SCR - * access operations, but uses SStatus and SControl only during - * boot probing in controlled way. - * - * As the old (pre EH update) probing code is proven to work, we - * strictly follow the access pattern. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -static int vt6420_prereset(struct ata_port *ap) -{ - struct ata_eh_context *ehc = &ap->eh_context; - unsigned long timeout = jiffies + (HZ * 5); - u32 sstatus, scontrol; - int online; - - /* don't do any SCR stuff if we're not loading */ - if (!ATA_PFLAG_LOADING) - goto skip_scr; - - /* Resume phy. This is the old resume sequence from - * __sata_phy_reset(). - */ - svia_scr_write(ap, SCR_CONTROL, 0x300); - svia_scr_read(ap, SCR_CONTROL); /* flush */ - - /* wait for phy to become ready, if necessary */ - do { - msleep(200); - if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) - break; - } while (time_before(jiffies, timeout)); - - /* open code sata_print_link_status() */ - sstatus = svia_scr_read(ap, SCR_STATUS); - scontrol = svia_scr_read(ap, SCR_CONTROL); - - online = (sstatus & 0xf) == 0x3; - - ata_port_printk(ap, KERN_INFO, - "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", - online ? "up" : "down", sstatus, scontrol); - - /* SStatus is read one more time */ - svia_scr_read(ap, SCR_STATUS); - - if (!online) { - /* tell EH to bail */ - ehc->i.action &= ~ATA_EH_RESET_MASK; - return 0; - } - - skip_scr: - /* wait for !BSY */ - ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); - - return 0; -} - -static void vt6420_error_handler(struct ata_port *ap) -{ - return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, - NULL, ata_std_postreset); -} - static const unsigned int svia_bar_sizes[] = { 8, 4, 8, 4, 16, 256 }; @@ -317,7 +210,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) { struct ata_probe_ent *probe_ent; - struct ata_port_info *ppi = &vt6420_port_info; + struct ata_port_info *ppi = &svia_port_info; probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) @@ -346,7 +239,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev) probe_ent->sht = &svia_sht; probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; - probe_ent->port_ops = &vt6421_sata_ops; + probe_ent->port_ops = &svia_sata_ops; probe_ent->n_ports = N_PORTS; probe_ent->irq = pdev->irq; probe_ent->irq_flags = IRQF_SHARED; diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index a8ed5a22009d..6a5b731bd5ba 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -460,8 +460,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) * Return value: * SUCCESS or FAILED or NEEDS_RETRY **/ -static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, - int cmnd_size, int timeout, int copy_sense) +static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -491,9 +490,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, old_cmd_len = scmd->cmd_len; old_use_sg = scmd->use_sg; - memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); - memcpy(scmd->cmnd, cmnd, cmnd_size); - if (copy_sense) { int gfp_mask = GFP_ATOMIC; @@ -614,7 +610,8 @@ static int scsi_request_sense(struct scsi_cmnd *scmd) static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 252, 0}; - return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1); + memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); + return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); } /** @@ -739,7 +736,10 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) int retry_cnt = 1, rtn; retry_tur: - rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); + memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); + + + rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", __FUNCTION__, scmd, rtn)); @@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) if (scmd->device->allow_restart) { int rtn; - rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, - START_UNIT_TIMEOUT, 0); + memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); + rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); if (rtn == SUCCESS) return 0; } diff --git a/trunk/drivers/scsi/scsi_transport_iscsi.c b/trunk/drivers/scsi/scsi_transport_iscsi.c index 2ecd14188574..7b9e8fa1a4e0 100644 --- a/trunk/drivers/scsi/scsi_transport_iscsi.c +++ b/trunk/drivers/scsi/scsi_transport_iscsi.c @@ -34,7 +34,6 @@ #define ISCSI_SESSION_ATTRS 11 #define ISCSI_CONN_ATTRS 11 #define ISCSI_HOST_ATTRS 0 -#define ISCSI_TRANSPORT_VERSION "1.1-646" struct iscsi_internal { int daemon_pid; @@ -635,13 +634,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) } static int -iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) +iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) { unsigned long flags; int rc; skb_get(skb); - rc = netlink_broadcast(nls, skb, 0, 1, gfp); + rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); if (rc < 0) { mempool_free(skb, zone->pool); printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); @@ -750,7 +749,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); + iscsi_broadcast_skb(conn->z_error, skb); dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", error); @@ -896,7 +895,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); + rc = iscsi_broadcast_skb(conn->z_pdu, skb); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session destruction event. Check iscsi daemon\n"); @@ -959,7 +958,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); + rc = iscsi_broadcast_skb(conn->z_pdu, skb); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session creation event. Check iscsi daemon\n"); @@ -1614,9 +1613,6 @@ static __init int iscsi_transport_init(void) { int err; - printk(KERN_INFO "Loading iSCSI transport class v%s.", - ISCSI_TRANSPORT_VERSION); - err = class_register(&iscsi_transport_class); if (err) return err; @@ -1682,4 +1678,3 @@ MODULE_AUTHOR("Mike Christie , " "Alex Aizman "); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); -MODULE_VERSION(ISCSI_TRANSPORT_VERSION); diff --git a/trunk/drivers/scsi/sg.c b/trunk/drivers/scsi/sg.c index 34f9343ed0af..65eef33846bb 100644 --- a/trunk/drivers/scsi/sg.c +++ b/trunk/drivers/scsi/sg.c @@ -18,8 +18,8 @@ * */ -static int sg_version_num = 30534; /* 2 digits for each component */ -#define SG_VERSION_STR "3.5.34" +static int sg_version_num = 30533; /* 2 digits for each component */ +#define SG_VERSION_STR "3.5.33" /* * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: @@ -60,7 +60,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #ifdef CONFIG_SCSI_PROC_FS #include -static char *sg_version_date = "20060818"; +static char *sg_version_date = "20050908"; static int sg_proc_init(void); static void sg_proc_cleanup(void); @@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) len = vma->vm_end - sa; len = (len < sg->length) ? len : sg->length; if (offset < len) { - page = virt_to_page(page_address(sg->page) + offset); + page = sg->page; get_page(page); /* increment page count */ break; } diff --git a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c index 739d3ef46a40..8c505076c0eb 100644 --- a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, diff --git a/trunk/drivers/serial/sunsab.c b/trunk/drivers/serial/sunsab.c index cfe20f730436..dc673e1b6fd9 100644 --- a/trunk/drivers/serial/sunsab.c +++ b/trunk/drivers/serial/sunsab.c @@ -886,15 +886,6 @@ static int sunsab_console_setup(struct console *con, char *options) unsigned long flags; unsigned int baud, quot; - /* - * The console framework calls us for each and every port - * registered. Defer the console setup until the requested - * port has been properly discovered. A bit of a hack, - * though... - */ - if (up->port.type != PORT_SUNSAB) - return -1; - printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); diff --git a/trunk/drivers/serial/sunzilog.c b/trunk/drivers/serial/sunzilog.c index d34f336d53d8..47bc3d57e019 100644 --- a/trunk/drivers/serial/sunzilog.c +++ b/trunk/drivers/serial/sunzilog.c @@ -1146,9 +1146,6 @@ static int __init sunzilog_console_setup(struct console *con, char *options) unsigned long flags; int baud, brg; - if (up->port.type != PORT_SUNZILOG) - return -1; - printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", (sunzilog_reg.minor - 64) + con->index, con->index); diff --git a/trunk/drivers/usb/misc/cypress_cy7c63.c b/trunk/drivers/usb/misc/cypress_cy7c63.c index 9c46746d5d00..a4062a6adbb8 100644 --- a/trunk/drivers/usb/misc/cypress_cy7c63.c +++ b/trunk/drivers/usb/misc/cypress_cy7c63.c @@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface, /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { - dev_err(&interface->dev, "Out of memory!\n"); + dev_err(&dev->udev->dev, "Out of memory!\n"); goto error; } diff --git a/trunk/drivers/usb/serial/pl2303.c b/trunk/drivers/usb/serial/pl2303.c index 65e4d046951a..efbbc0adb89a 100644 --- a/trunk/drivers/usb/serial/pl2303.c +++ b/trunk/drivers/usb/serial/pl2303.c @@ -79,6 +79,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, + { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) }, { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, { } /* Terminating entry */ diff --git a/trunk/drivers/usb/serial/pl2303.h b/trunk/drivers/usb/serial/pl2303.h index 55195e76eb6f..a692ac66ca6c 100644 --- a/trunk/drivers/usb/serial/pl2303.h +++ b/trunk/drivers/usb/serial/pl2303.h @@ -82,6 +82,10 @@ #define SPEEDDRAGON_VENDOR_ID 0x0e55 #define SPEEDDRAGON_PRODUCT_ID 0x110b +/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */ +#define OTI_VENDOR_ID 0x0ea0 +#define OTI_PRODUCT_ID 0x6858 + /* DATAPILOT Universal-2 Phone Cable */ #define DATAPILOT_U2_VENDOR_ID 0x0731 #define DATAPILOT_U2_PRODUCT_ID 0x2003 diff --git a/trunk/drivers/usb/storage/unusual_devs.h b/trunk/drivers/usb/storage/unusual_devs.h index 4a803d69fa36..fd158e063c06 100644 --- a/trunk/drivers/usb/storage/unusual_devs.h +++ b/trunk/drivers/usb/storage/unusual_devs.h @@ -1261,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, * Tested on hardware version 1.10. * Entry is needed only for the initializer function override. */ -UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110, +UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, "Desknote", "UCR-61S2B", US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, diff --git a/trunk/drivers/video/imacfb.c b/trunk/drivers/video/imacfb.c index 18ea4a549105..b485bece5fc9 100644 --- a/trunk/drivers/video/imacfb.c +++ b/trunk/drivers/video/imacfb.c @@ -71,10 +71,10 @@ static int set_system(struct dmi_system_id *id) static struct dmi_system_id __initdata dmi_system_table[] = { { set_system, "iMac4,1", { DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17}, + DMI_MATCH(DMI_BIOS_VERSION,"iMac4,1") }, (void*)M_I17}, { set_system, "MacBookPro1,1", { DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17}, + DMI_MATCH(DMI_BIOS_VERSION,"MacBookPro1,1") }, (void*)M_I17}, { set_system, "MacBook1,1", { DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK}, diff --git a/trunk/drivers/video/matrox/g450_pll.c b/trunk/drivers/video/matrox/g450_pll.c index 7c76e079ca7d..440272ad10e7 100644 --- a/trunk/drivers/video/matrox/g450_pll.c +++ b/trunk/drivers/video/matrox/g450_pll.c @@ -331,15 +331,7 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll, tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); -#ifdef __powerpc__ - /* This is necessary to avoid jitter on PowerPC - * (OpenFirmware) systems, but apparently - * introduces jitter, at least on a x86-64 - * using DVI. - * A simple workaround is disable for non-PPC. - */ matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); -#endif /* __powerpc__ */ matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 045f98854f14..37534573960b 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -884,61 +884,6 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); -static int __blkdev_put(struct block_device *bdev, unsigned int subclass) -{ - int ret = 0; - struct inode *bd_inode = bdev->bd_inode; - struct gendisk *disk = bdev->bd_disk; - - mutex_lock_nested(&bdev->bd_mutex, subclass); - lock_kernel(); - if (!--bdev->bd_openers) { - sync_blockdev(bdev); - kill_bdev(bdev); - } - if (bdev->bd_contains == bdev) { - if (disk->fops->release) - ret = disk->fops->release(bd_inode, NULL); - } else { - mutex_lock_nested(&bdev->bd_contains->bd_mutex, - subclass + 1); - bdev->bd_contains->bd_part_count--; - mutex_unlock(&bdev->bd_contains->bd_mutex); - } - if (!bdev->bd_openers) { - struct module *owner = disk->fops->owner; - - put_disk(disk); - module_put(owner); - - if (bdev->bd_contains != bdev) { - kobject_put(&bdev->bd_part->kobj); - bdev->bd_part = NULL; - } - bdev->bd_disk = NULL; - bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) - __blkdev_put(bdev->bd_contains, subclass + 1); - bdev->bd_contains = NULL; - } - unlock_kernel(); - mutex_unlock(&bdev->bd_mutex); - bdput(bdev); - return ret; -} - -int blkdev_put(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_NORMAL); -} -EXPORT_SYMBOL(blkdev_put); - -int blkdev_put_partition(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_PARTITION); -} -EXPORT_SYMBOL(blkdev_put_partition); - static int blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); @@ -1035,7 +980,7 @@ do_open(struct block_device *bdev, struct file *file, unsigned int subclass) bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; if (bdev != bdev->bd_contains) - __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE); + blkdev_put(bdev->bd_contains); bdev->bd_contains = NULL; put_disk(disk); module_put(owner); @@ -1134,6 +1079,63 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } +static int __blkdev_put(struct block_device *bdev, unsigned int subclass) +{ + int ret = 0; + struct inode *bd_inode = bdev->bd_inode; + struct gendisk *disk = bdev->bd_disk; + + mutex_lock_nested(&bdev->bd_mutex, subclass); + lock_kernel(); + if (!--bdev->bd_openers) { + sync_blockdev(bdev); + kill_bdev(bdev); + } + if (bdev->bd_contains == bdev) { + if (disk->fops->release) + ret = disk->fops->release(bd_inode, NULL); + } else { + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + subclass + 1); + bdev->bd_contains->bd_part_count--; + mutex_unlock(&bdev->bd_contains->bd_mutex); + } + if (!bdev->bd_openers) { + struct module *owner = disk->fops->owner; + + put_disk(disk); + module_put(owner); + + if (bdev->bd_contains != bdev) { + kobject_put(&bdev->bd_part->kobj); + bdev->bd_part = NULL; + } + bdev->bd_disk = NULL; + bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; + if (bdev != bdev->bd_contains) + __blkdev_put(bdev->bd_contains, subclass + 1); + bdev->bd_contains = NULL; + } + unlock_kernel(); + mutex_unlock(&bdev->bd_mutex); + bdput(bdev); + return ret; +} + +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_NORMAL); +} + +EXPORT_SYMBOL(blkdev_put); + +int blkdev_put_partition(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_PARTITION); +} + +EXPORT_SYMBOL(blkdev_put_partition); + static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index 3a3567433b92..19ffb043abbc 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi) eexit_1: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", - current, ep, epi->ffd.file, error)); + current, ep, epi->file, error)); return error; } @@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k struct eventpoll *ep = epi->ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", - current, epi->ffd.file, epi, ep)); + current, epi->file, epi, ep)); write_lock_irqsave(&ep->lock, flags); diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c index 54135df2a966..8344ba73a2a6 100644 --- a/trunk/fs/exec.c +++ b/trunk/fs/exec.c @@ -486,6 +486,8 @@ struct file *open_exec(const char *name) if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && S_ISREG(inode->i_mode)) { int err = vfs_permission(&nd, MAY_EXEC); + if (!err && !(inode->i_mode & 0111)) + err = -EACCES; file = ERR_PTR(err); if (!err) { file = nameidata_to_filp(&nd, O_RDONLY); @@ -751,7 +753,7 @@ static int de_thread(struct task_struct *tsk) write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); - spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); + spin_lock(&newsighand->siglock); rcu_assign_pointer(current->sighand, newsighand); recalc_sigpending(); @@ -920,6 +922,12 @@ int prepare_binprm(struct linux_binprm *bprm) int retval; mode = inode->i_mode; + /* + * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, + * generic_permission lets a non-executable through + */ + if (!(mode & 0111)) /* with at least _one_ execute bit set */ + return -EACCES; if (bprm->file->f_op == NULL) return -EACCES; diff --git a/trunk/fs/ext2/super.c b/trunk/fs/ext2/super.c index 681dea8f9532..f2702cda9779 100644 --- a/trunk/fs/ext2/super.c +++ b/trunk/fs/ext2/super.c @@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); - if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) + if (sbi->s_inodes_per_block == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; diff --git a/trunk/fs/ext3/balloc.c b/trunk/fs/ext3/balloc.c index 063d994bda0b..a504a40d6d29 100644 --- a/trunk/fs/ext3/balloc.c +++ b/trunk/fs/ext3/balloc.c @@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, goal = le32_to_cpu(es->s_first_data_block); group_no = (goal - le32_to_cpu(es->s_first_data_block)) / EXT3_BLOCKS_PER_GROUP(sb); - goal_group = group_no; -retry_alloc: gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); if (!gdp) goto io_error; + goal_group = group_no; +retry: free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * if there is not enough free blocks to make a new resevation @@ -1349,7 +1349,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, if (my_rsv) { my_rsv = NULL; group_no = goal_group; - goto retry_alloc; + goto retry; } /* No space left on the device */ *errp = -ENOSPC; diff --git a/trunk/fs/ioprio.c b/trunk/fs/ioprio.c index 78b1deae3fa2..93aa5715f224 100644 --- a/trunk/fs/ioprio.c +++ b/trunk/fs/ioprio.c @@ -44,9 +44,6 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) task->ioprio = ioprio; ioc = task->io_context; - /* see wmb() in current_io_context() */ - smp_read_barrier_depends(); - if (ioc && ioc->set_ioprio) ioc->set_ioprio(ioc, ioprio); @@ -114,9 +111,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) continue; ret = set_task_ioprio(p, ioprio); if (ret) - goto free_uid; + break; } while_each_thread(g, p); -free_uid: + if (who) free_uid(user); break; @@ -140,29 +137,6 @@ static int get_task_ioprio(struct task_struct *p) return ret; } -int ioprio_best(unsigned short aprio, unsigned short bprio) -{ - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); - - if (!ioprio_valid(aprio)) - return bprio; - if (!ioprio_valid(bprio)) - return aprio; - - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; - - if (aclass == bclass) - return min(aprio, bprio); - if (aclass > bclass) - return bprio; - else - return aprio; -} - asmlinkage long sys_ioprio_get(int which, int who) { struct task_struct *g, *p; diff --git a/trunk/fs/jbd/commit.c b/trunk/fs/jbd/commit.c index 42da60784311..0971814c38b8 100644 --- a/trunk/fs/jbd/commit.c +++ b/trunk/fs/jbd/commit.c @@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal) struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); - jbd_slab_free(jh->b_committed_data, bh->b_size); + kfree(jh->b_committed_data); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } @@ -745,14 +745,14 @@ void journal_commit_transaction(journal_t *journal) * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { - jbd_slab_free(jh->b_committed_data, bh->b_size); + kfree(jh->b_committed_data); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { - jbd_slab_free(jh->b_frozen_data, bh->b_size); + kfree(jh->b_frozen_data); jh->b_frozen_data = NULL; } diff --git a/trunk/fs/jbd/journal.c b/trunk/fs/jbd/journal.c index f66724ce443a..8c9b28dff119 100644 --- a/trunk/fs/jbd/journal.c +++ b/trunk/fs/jbd/journal.c @@ -84,7 +84,6 @@ EXPORT_SYMBOL(journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); -static int journal_create_jbd_slab(size_t slab_size); /* * Helper function used to manage commit timeouts @@ -329,10 +328,10 @@ int journal_write_metadata_buffer(transaction_t *transaction, char *tmp; jbd_unlock_bh_state(bh_in); - tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); + tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { - jbd_slab_free(tmp, bh_in->b_size); + kfree(tmp); goto repeat; } @@ -1070,17 +1069,17 @@ static int load_superblock(journal_t *journal) int journal_load(journal_t *journal) { int err; - journal_superblock_t *sb; err = load_superblock(journal); if (err) return err; - sb = journal->j_superblock; /* If this is a V2 superblock, then we have to check the * features flags on it. */ if (journal->j_format_version >= 2) { + journal_superblock_t *sb = journal->j_superblock; + if ((sb->s_feature_ro_compat & ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || (sb->s_feature_incompat & @@ -1091,13 +1090,6 @@ int journal_load(journal_t *journal) } } - /* - * Create a slab for this blocksize - */ - err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); - if (err) - return err; - /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (journal_recover(journal)) @@ -1619,77 +1611,6 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); } -/* - * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed - * and allocate frozen and commit buffers from these slabs. - * - * Reason for doing this is to avoid, SLAB_DEBUG - since it could - * cause bh to cross page boundary. - */ - -#define JBD_MAX_SLABS 5 -#define JBD_SLAB_INDEX(size) (size >> 11) - -static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; -static const char *jbd_slab_names[JBD_MAX_SLABS] = { - "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" -}; - -static void journal_destroy_jbd_slabs(void) -{ - int i; - - for (i = 0; i < JBD_MAX_SLABS; i++) { - if (jbd_slab[i]) - kmem_cache_destroy(jbd_slab[i]); - jbd_slab[i] = NULL; - } -} - -static int journal_create_jbd_slab(size_t slab_size) -{ - int i = JBD_SLAB_INDEX(slab_size); - - BUG_ON(i >= JBD_MAX_SLABS); - - /* - * Check if we already have a slab created for this size - */ - if (jbd_slab[i]) - return 0; - - /* - * Create a slab and force alignment to be same as slabsize - - * this will make sure that allocations won't cross the page - * boundary. - */ - jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], - slab_size, slab_size, 0, NULL, NULL); - if (!jbd_slab[i]) { - printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); - return -ENOMEM; - } - return 0; -} - -void * jbd_slab_alloc(size_t size, gfp_t flags) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); -} - -void jbd_slab_free(void *ptr, size_t size) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - kmem_cache_free(jbd_slab[idx], ptr); -} - /* * Journal_head storage management */ @@ -1878,13 +1799,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __FUNCTION__); - jbd_slab_free(jh->b_frozen_data, bh->b_size); + kfree(jh->b_frozen_data); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __FUNCTION__); - jbd_slab_free(jh->b_committed_data, bh->b_size); + kfree(jh->b_committed_data); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ @@ -2040,7 +1961,6 @@ static void journal_destroy_caches(void) journal_destroy_revoke_caches(); journal_destroy_journal_head_cache(); journal_destroy_handle_cache(); - journal_destroy_jbd_slabs(); } static int __init journal_init(void) diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c index de2e4cbbf79a..508b2ea91f43 100644 --- a/trunk/fs/jbd/transaction.c +++ b/trunk/fs/jbd/transaction.c @@ -666,9 +666,8 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); - frozen_buffer = - jbd_slab_alloc(jh2bh(jh)->b_size, - GFP_NOFS); + frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, + GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", @@ -880,7 +879,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) repeat: if (!jh->b_committed_data) { - committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); + committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __FUNCTION__); @@ -907,7 +906,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) out: journal_put_journal_head(jh); if (unlikely(committed_data)) - jbd_slab_free(committed_data, bh->b_size); + kfree(committed_data); return err; } diff --git a/trunk/fs/lockd/svcsubs.c b/trunk/fs/lockd/svcsubs.c index 01b4db9e5466..2a4df9b3779a 100644 --- a/trunk/fs/lockd/svcsubs.c +++ b/trunk/fs/lockd/svcsubs.c @@ -237,22 +237,19 @@ static int nlm_traverse_files(struct nlm_host *host, int action) { struct nlm_file *file, **fp; - int i, ret = 0; + int i; mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { fp = nlm_files + i; while ((file = *fp) != NULL) { - file->f_count++; - mutex_unlock(&nlm_file_mutex); - /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ - if (nlm_inspect_file(host, file, action)) - ret = 1; + if (nlm_inspect_file(host, file, action)) { + mutex_unlock(&nlm_file_mutex); + return 1; + } - mutex_lock(&nlm_file_mutex); - file->f_count--; /* No more references to this file. Let go of it. */ if (!file->f_blocks && !file->f_locks && !file->f_shares && !file->f_count) { @@ -265,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action) } } mutex_unlock(&nlm_file_mutex); - return ret; + return 0; } /* diff --git a/trunk/fs/minix/inode.c b/trunk/fs/minix/inode.c index 330ff9fc7cf0..9ea91c5eeb7b 100644 --- a/trunk/fs/minix/inode.c +++ b/trunk/fs/minix/inode.c @@ -204,8 +204,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kmalloc(i, GFP_KERNEL); if (!map) @@ -265,7 +263,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); -out_freemap: + out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) @@ -278,16 +276,11 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) printk("MINIX-fs: can't allocate map\n"); goto out_release; -out_illegal_sb: - if (!silent) - printk("MINIX-fs: bad superblock\n"); - goto out_release; - out_no_fs: if (!silent) printk("VFS: Can't find a Minix or Minix V2 filesystem " "on device %s\n", s->s_id); -out_release: + out_release: brelse(bh); goto out; @@ -297,7 +290,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); -out: + out: s->s_fs_info = NULL; kfree(sbi); return -EINVAL; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 432d6bc6fab0..55a131230f94 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask, int permission(struct inode *inode, int mask, struct nameidata *nd) { - umode_t mode = inode->i_mode; int retval, submask; if (mask & MAY_WRITE) { + umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. @@ -247,13 +247,6 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) } - /* - * MAY_EXEC on regular files requires special handling: We override - * filesystem execute permissions if the mode bits aren't set. - */ - if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) - return -EACCES; - /* Ordinary permission routines do not understand MAY_APPEND. */ submask = mask & ~MAY_APPEND; if (inode->i_op && inode->i_op->permission) @@ -1774,8 +1767,6 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; - nd->flags |= LOOKUP_CREATE; - nd->intent.open.flags = O_EXCL; /* * Do the final lookup. diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c index 48e892880d5b..cc2b874ad5a4 100644 --- a/trunk/fs/nfs/file.c +++ b/trunk/fs/nfs/file.c @@ -312,13 +312,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) static int nfs_release_page(struct page *page, gfp_t gfp) { - if (gfp & __GFP_FS) - return !nfs_wb_page(page->mapping->host, page); - else - /* - * Avoid deadlock on nfs_wait_on_request(). - */ - return 0; + return !nfs_wb_page(page->mapping->host, page); } const struct address_space_operations nfs_file_aops = { diff --git a/trunk/fs/nfs/idmap.c b/trunk/fs/nfs/idmap.c index 07a5dd57646e..b81e7ed3c902 100644 --- a/trunk/fs/nfs/idmap.c +++ b/trunk/fs/nfs/idmap.c @@ -130,7 +130,9 @@ nfs_idmap_delete(struct nfs4_client *clp) if (!idmap) return; - rpc_unlink(idmap->idmap_dentry); + dput(idmap->idmap_dentry); + idmap->idmap_dentry = NULL; + rpc_unlink(idmap->idmap_path); clp->cl_idmap = NULL; kfree(idmap); } diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index 153898e1331f..e6ee97f19d81 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -2668,7 +2668,7 @@ static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t a nfs4_set_cached_acl(inode, acl); } -static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) +static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES]; struct nfs_getaclargs args = { @@ -2721,19 +2721,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu return ret; } -static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) -{ - struct nfs4_exception exception = { }; - ssize_t ret; - do { - ret = __nfs4_get_acl_uncached(inode, buf, buflen); - if (ret >= 0) - break; - ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); - } while (exception.retry); - return ret; -} - static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); @@ -2750,7 +2737,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) return nfs4_get_acl_uncached(inode, buf, buflen); } -static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) +static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFS4ACL_MAXPAGES]; @@ -2776,18 +2763,6 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl return ret; } -static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) -{ - struct nfs4_exception exception = { }; - int err; - do { - err = nfs4_handle_exception(NFS_SERVER(inode), - __nfs4_proc_set_acl(inode, buf, buflen), - &exception); - } while (exception.retry); - return err; -} - static int nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) { diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c index 730ec8fb31c6..1750d996f49f 100644 --- a/trunk/fs/nfs/nfs4xdr.c +++ b/trunk/fs/nfs/nfs4xdr.c @@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n struct kvec *iov = rcvbuf->head; unsigned int nr, pglen = rcvbuf->page_len; uint32_t *end, *entry, *p, *kaddr; - uint32_t len, attrlen, xlen; + uint32_t len, attrlen; int hdrlen, recvd, status; status = decode_op_hdr(xdr, OP_READDIR); @@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); - end = p + ((pglen + readdir->pgbase) >> 2); + end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); entry = p; for (nr = 0; *p++; nr++) { - if (end - p < 3) + if (p + 3 > end) goto short_pkt; dprintk("cookie = %Lu, ", *((unsigned long long *)p)); p += 2; /* cookie */ @@ -3389,19 +3389,18 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); goto err_unmap; } - xlen = XDR_QUADLEN(len); - if (end - p < xlen + 1) - goto short_pkt; dprintk("filename = %*s\n", len, (char *)p); - p += xlen; - len = ntohl(*p++); /* bitmap length */ - if (end - p < len + 1) + p += XDR_QUADLEN(len); + if (p + 1 > end) goto short_pkt; + len = ntohl(*p++); /* bitmap length */ p += len; - attrlen = XDR_QUADLEN(ntohl(*p++)); - if (end - p < attrlen + 2) + if (p + 1 > end) goto short_pkt; + attrlen = XDR_QUADLEN(ntohl(*p++)); p += attrlen; /* attributes */ + if (p + 2 > end) + goto short_pkt; entry = p; } if (!nr && (entry[0] != 0 || entry[1] == 0)) diff --git a/trunk/fs/nfs/read.c b/trunk/fs/nfs/read.c index da9cf11c326f..65c0c5b32351 100644 --- a/trunk/fs/nfs/read.c +++ b/trunk/fs/nfs/read.c @@ -116,17 +116,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; base &= ~PAGE_CACHE_MASK; pglen = PAGE_CACHE_SIZE - base; - for (;;) { - if (remainder <= pglen) { - memclear_highpage_flush(*pages, base, remainder); - break; - } + if (pglen < remainder) memclear_highpage_flush(*pages, base, pglen); - pages++; - remainder -= pglen; - pglen = PAGE_CACHE_SIZE; - base = 0; - } + else + memclear_highpage_flush(*pages, base, remainder); } /* @@ -483,8 +476,6 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) unsigned int base = data->args.pgbase; struct page **pages; - if (data->res.eof) - count = data->args.count; if (unlikely(count == 0)) return; pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; @@ -492,7 +483,11 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageUptodate(*pages); - if (count != 0) + /* + * Was this an eof or a short read? If the latter, don't mark the page + * as uptodate yet. + */ + if (count > 0 && (data->res.eof || data->args.count == data->res.count)) SetPageUptodate(*pages); } @@ -507,8 +502,6 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageError(*pages); - if (count != 0) - SetPageError(*pages); } /* diff --git a/trunk/fs/partitions/sun.c b/trunk/fs/partitions/sun.c index 0a5927c806ca..abe91ca03edf 100644 --- a/trunk/fs/partitions/sun.c +++ b/trunk/fs/partitions/sun.c @@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); for (i = 0; i < 8; i++, p++) { unsigned long st_sector; - unsigned int num_sectors; + int num_sectors; st_sector = be32_to_cpu(p->start_cylinder) * spc; num_sectors = be32_to_cpu(p->num_sectors); diff --git a/trunk/fs/proc/proc_misc.c b/trunk/fs/proc/proc_misc.c index 942156225447..9f2cfc30f9cf 100644 --- a/trunk/fs/proc/proc_misc.c +++ b/trunk/fs/proc/proc_misc.c @@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "PageTables: %8lu kB\n" - "NFS_Unstable: %8lu kB\n" + "NFS Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" diff --git a/trunk/fs/reiserfs/xattr.c b/trunk/fs/reiserfs/xattr.c index d935fb9394e3..39fedaa88a0c 100644 --- a/trunk/fs/reiserfs/xattr.c +++ b/trunk/fs/reiserfs/xattr.c @@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf) int res = -ENOTDIR; if (!file->f_op || !file->f_op->readdir) goto out; - mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); + mutex_lock(&inode->i_mutex); // down(&inode->i_zombie); res = -ENOENT; if (!IS_DEADDIR(inode)) { diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c index fcce1a21a51b..7de172efa084 100644 --- a/trunk/fs/udf/super.c +++ b/trunk/fs/udf/super.c @@ -1659,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) iput(inode); goto error_out; } - sb->s_maxbytes = 1<<30; + sb->s_maxbytes = MAX_LFS_FILESIZE; return 0; error_out: diff --git a/trunk/fs/udf/truncate.c b/trunk/fs/udf/truncate.c index 0abd66ce36ea..e1b0e8cfecb4 100644 --- a/trunk/fs/udf/truncate.c +++ b/trunk/fs/udf/truncate.c @@ -239,51 +239,37 @@ void udf_truncate_extents(struct inode * inode) { if (offset) { - /* - * OK, there is not extent covering inode->i_size and - * no extent above inode->i_size => truncate is - * extending the file by 'offset'. - */ - if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) || - (bh && extoffset == sizeof(struct allocExtDesc))) { - /* File has no extents at all! */ - memset(&eloc, 0x00, sizeof(kernel_lb_addr)); - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; - udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); + extoffset -= adsize; + etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); + if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) + { + extoffset -= adsize; + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); } - else { + else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) + { + kernel_lb_addr neloc = { 0, 0 }; extoffset -= adsize; - etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); - if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) - { - extoffset -= adsize; - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); - } - else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) + nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | + ((elen + offset + inode->i_sb->s_blocksize - 1) & + ~(inode->i_sb->s_blocksize - 1)); + udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); + udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); + } + else + { + if (elen & (inode->i_sb->s_blocksize - 1)) { - kernel_lb_addr neloc = { 0, 0 }; extoffset -= adsize; - nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | - ((elen + offset + inode->i_sb->s_blocksize - 1) & + elen = EXT_RECORDED_ALLOCATED | + ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); - udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); - } - else - { - if (elen & (inode->i_sb->s_blocksize - 1)) - { - extoffset -= adsize; - elen = EXT_RECORDED_ALLOCATED | - ((elen + inode->i_sb->s_blocksize - 1) & - ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); - } - memset(&eloc, 0x00, sizeof(kernel_lb_addr)); - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; - udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); } + memset(&eloc, 0x00, sizeof(kernel_lb_addr)); + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; + udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); } } } diff --git a/trunk/fs/ufs/inode.c b/trunk/fs/ufs/inode.c index 30c6e8a9446c..e7c8615beb65 100644 --- a/trunk/fs/ufs/inode.c +++ b/trunk/fs/ufs/inode.c @@ -169,20 +169,18 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) static struct buffer_head * ufs_clear_frags(struct inode *inode, sector_t beg, - unsigned int n, sector_t want) + unsigned int n) { - struct buffer_head *res = NULL, *bh; + struct buffer_head *res, *bh; sector_t end = beg + n; - for (; beg < end; ++beg) { + res = sb_getblk(inode->i_sb, beg); + ufs_clear_frag(inode, res); + for (++beg; beg < end; ++beg) { bh = sb_getblk(inode->i_sb, beg); ufs_clear_frag(inode, bh); - if (want != beg) - brelse(bh); - else - res = bh; + brelse(bh); } - BUG_ON(!res); return res; } @@ -267,9 +265,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned int fragment, lastfrag = ufsi->i_lastfrag; } - tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); - if (tmp) - goal = tmp + uspi->s_fpb; + goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; tmp = ufs_new_fragments (inode, p, fragment - blockoff, goal, required + blockoff, err, locked_page); @@ -281,15 +277,13 @@ ufs_inode_getfrag(struct inode *inode, unsigned int fragment, tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err, locked_page); - } else /* (lastblock > block) */ { + } /* * We will allocate new block before last allocated block */ - if (block) { - tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); - if (tmp) - goal = tmp + uspi->s_fpb; - } + else /* (lastblock > block) */ { + if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) + goal = tmp + uspi->s_fpb; tmp = ufs_new_fragments(inode, p, fragment - blockoff, goal, uspi->s_fpb, err, locked_page); } @@ -302,7 +296,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned int fragment, } if (!phys) { - result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); + result = ufs_clear_frags(inode, tmp + blockoff, required); } else { *phys = tmp + blockoff; result = NULL; @@ -389,7 +383,7 @@ ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, } } - if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) + if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; @@ -403,8 +397,7 @@ ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, if (!phys) { - result = ufs_clear_frags(inode, tmp, uspi->s_fpb, - tmp + blockoff); + result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); } else { *phys = tmp + blockoff; *new = 1; diff --git a/trunk/fs/ufs/truncate.c b/trunk/fs/ufs/truncate.c index ea11d04c41a0..c9b55872079b 100644 --- a/trunk/fs/ufs/truncate.c +++ b/trunk/fs/ufs/truncate.c @@ -375,15 +375,17 @@ static int ufs_alloc_lastblock(struct inode *inode) int err = 0; struct address_space *mapping = inode->i_mapping; struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; + struct ufs_inode_info *ufsi = UFS_I(inode); unsigned lastfrag, i, end; struct page *lastpage; struct buffer_head *bh; lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; - if (!lastfrag) + if (!lastfrag) { + ufsi->i_lastfrag = 0; goto out; - + } lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> @@ -398,25 +400,25 @@ static int ufs_alloc_lastblock(struct inode *inode) for (i = 0; i < end; ++i) bh = bh->b_this_page; - - err = ufs_getfrag_block(inode, lastfrag, bh, 1); - - if (unlikely(err)) - goto out_unlock; - - if (buffer_new(bh)) { - clear_buffer_new(bh); - unmap_underlying_metadata(bh->b_bdev, - bh->b_blocknr); - /* - * we do not zeroize fragment, because of - * if it maped to hole, it already contains zeroes - */ - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - set_page_dirty(lastpage); + if (!buffer_mapped(bh)) { + err = ufs_getfrag_block(inode, lastfrag, bh, 1); + + if (unlikely(err)) + goto out_unlock; + + if (buffer_new(bh)) { + clear_buffer_new(bh); + unmap_underlying_metadata(bh->b_bdev, + bh->b_blocknr); + /* + * we do not zeroize fragment, because of + * if it maped to hole, it already contains zeroes + */ + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + set_page_dirty(lastpage); + } } - out_unlock: ufs_put_locked_page(lastpage); out: @@ -438,11 +440,23 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; - err = ufs_alloc_lastblock(inode); + if (inode->i_size > old_i_size) { + /* + * if we expand file we should care about + * allocation of block for last byte first of all + */ + err = ufs_alloc_lastblock(inode); - if (err) { - i_size_write(inode, old_i_size); - goto out; + if (err) { + i_size_write(inode, old_i_size); + goto out; + } + /* + * go away, because of we expand file, and we do not + * need free blocks, and zeroizes page + */ + lock_kernel(); + goto almost_end; } block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); @@ -463,8 +477,21 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) yield(); } + if (inode->i_size < old_i_size) { + /* + * now we should have enough space + * to allocate block for last byte + */ + err = ufs_alloc_lastblock(inode); + if (err) + /* + * looks like all the same - we have no space, + * but we truncate file already + */ + inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize; + } +almost_end: inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; - ufsi->i_lastfrag = DIRECT_FRAGMENT; unlock_kernel(); mark_inode_dirty(inode); out: diff --git a/trunk/include/asm-arm/arch-s3c2410/dma.h b/trunk/include/asm-arm/arch-s3c2410/dma.h index 7463fd5252ce..72964f9b8414 100644 --- a/trunk/include/asm-arm/arch-s3c2410/dma.h +++ b/trunk/include/asm-arm/arch-s3c2410/dma.h @@ -104,7 +104,6 @@ enum s3c2410_chan_op_e { S3C2410_DMAOP_RESUME, S3C2410_DMAOP_FLUSH, S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */ - S3C2410_DMAOP_STARTED, /* indicate channel started */ }; typedef enum s3c2410_chan_op_e s3c2410_chan_op_t; diff --git a/trunk/include/asm-arm/arch-s3c2410/regs-rtc.h b/trunk/include/asm-arm/arch-s3c2410/regs-rtc.h index 0fbec07bb6b8..228983f89bc8 100644 --- a/trunk/include/asm-arm/arch-s3c2410/regs-rtc.h +++ b/trunk/include/asm-arm/arch-s3c2410/regs-rtc.h @@ -18,7 +18,7 @@ #ifndef __ASM_ARCH_REGS_RTC_H #define __ASM_ARCH_REGS_RTC_H __FILE__ -#define S3C2410_RTCREG(x) (x) +#define S3C2410_RTCREG(x) ((x) + S3C24XX_VA_RTC) #define S3C2410_RTCCON S3C2410_RTCREG(0x40) #define S3C2410_RTCCON_RTCEN (1<<0) diff --git a/trunk/include/asm-arm/procinfo.h b/trunk/include/asm-arm/procinfo.h index 91a31adfa8a8..edb7b6502fcf 100644 --- a/trunk/include/asm-arm/procinfo.h +++ b/trunk/include/asm-arm/procinfo.h @@ -55,6 +55,5 @@ extern unsigned int elf_hwcap; #define HWCAP_VFP 64 #define HWCAP_EDSP 128 #define HWCAP_JAVA 256 -#define HWCAP_IWMMXT 512 #endif diff --git a/trunk/include/asm-i386/mmzone.h b/trunk/include/asm-i386/mmzone.h index 22cb07cc8f32..e33e9f9e4c66 100644 --- a/trunk/include/asm-i386/mmzone.h +++ b/trunk/include/asm-i386/mmzone.h @@ -14,7 +14,7 @@ extern struct pglist_data *node_data[]; #ifdef CONFIG_X86_NUMAQ #include -#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ +#else /* summit or generic arch */ #include #endif diff --git a/trunk/include/asm-powerpc/pgalloc.h b/trunk/include/asm-powerpc/pgalloc.h index ae63db7b3e7d..9f0917c68659 100644 --- a/trunk/include/asm-powerpc/pgalloc.h +++ b/trunk/include/asm-powerpc/pgalloc.h @@ -117,7 +117,7 @@ static inline void pte_free(struct page *ptepage) pte_free_kernel(page_address(ptepage)); } -#define PGF_CACHENUM_MASK 0x3 +#define PGF_CACHENUM_MASK 0xf typedef struct pgtable_free { unsigned long val; diff --git a/trunk/include/asm-powerpc/system.h b/trunk/include/asm-powerpc/system.h index 4c9f5229e833..7307aa775671 100644 --- a/trunk/include/asm-powerpc/system.h +++ b/trunk/include/asm-powerpc/system.h @@ -53,15 +53,6 @@ #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ -/* - * This is a barrier which prevents following instructions from being - * started until the value of the argument x is known. For example, if - * x is a variable loaded from memory, this prevents following - * instructions from being executed until the load has been performed. - */ -#define data_barrier(x) \ - asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); - struct task_struct; struct pt_regs; diff --git a/trunk/include/asm-powerpc/tsi108.h b/trunk/include/asm-powerpc/tsi108.h index 2c702d35a7cf..c4c278d72f71 100644 --- a/trunk/include/asm-powerpc/tsi108.h +++ b/trunk/include/asm-powerpc/tsi108.h @@ -1,18 +1,16 @@ /* + * include/asm-ppc/tsi108.h + * * common routine and memory layout for Tundra TSI108(Grendel) host bridge * memory controller. * * Author: Jacob Pan (jacob.pan@freescale.com) * Alex Bounine (alexandreb@tundra.com) - * - * Copyright 2004-2006 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. + * 2004 (c) Freescale Semiconductor Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. */ - #ifndef __PPC_KERNEL_TSI108_H #define __PPC_KERNEL_TSI108_H diff --git a/trunk/include/asm-powerpc/tsi108_irq.h b/trunk/include/asm-powerpc/tsi108_irq.h deleted file mode 100644 index 3e4d04effa57..000000000000 --- a/trunk/include/asm-powerpc/tsi108_irq.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * (C) Copyright 2005 Tundra Semiconductor Corp. - * Alex Bounine, child -> normal -> xattr -> quota + * parent -> child -> normal -> quota */ enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, - I_MUTEX_XATTR, I_MUTEX_QUOTA }; diff --git a/trunk/include/linux/ioprio.h b/trunk/include/linux/ioprio.h index 8e2042b9d471..88d5961f7a3f 100644 --- a/trunk/include/linux/ioprio.h +++ b/trunk/include/linux/ioprio.h @@ -59,6 +59,27 @@ static inline int task_nice_ioprio(struct task_struct *task) /* * For inheritance, return the highest of the two given priorities */ -extern int ioprio_best(unsigned short aprio, unsigned short bprio); +static inline int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} #endif diff --git a/trunk/include/linux/jbd.h b/trunk/include/linux/jbd.h index a04c154c5207..20eb34403d0c 100644 --- a/trunk/include/linux/jbd.h +++ b/trunk/include/linux/jbd.h @@ -72,9 +72,6 @@ extern int journal_enable_debug; #endif extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); -extern void * jbd_slab_alloc(size_t size, gfp_t flags); -extern void jbd_slab_free(void *ptr, size_t size); - #define jbd_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ diff --git a/trunk/include/linux/netfilter_bridge.h b/trunk/include/linux/netfilter_bridge.h index 427c67ff89e9..10c13dc4665b 100644 --- a/trunk/include/linux/netfilter_bridge.h +++ b/trunk/include/linux/netfilter_bridge.h @@ -48,25 +48,15 @@ enum nf_br_hook_priorities { /* Only used in br_forward.c */ static inline -int nf_bridge_maybe_copy_header(struct sk_buff *skb) +void nf_bridge_maybe_copy_header(struct sk_buff *skb) { - int err; - if (skb->nf_bridge) { if (skb->protocol == __constant_htons(ETH_P_8021Q)) { - err = skb_cow(skb, 18); - if (err) - return err; memcpy(skb->data - 18, skb->nf_bridge->data, 18); skb_push(skb, 4); - } else { - err = skb_cow(skb, 16); - if (err) - return err; + } else memcpy(skb->data - 16, skb->nf_bridge->data, 16); - } } - return 0; } /* This is called by the IP fragmenting code and it ensures there is diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index db9cbf68e12b..2d3fb6416d91 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -659,7 +659,7 @@ struct nfs4_rename_res { struct nfs4_setclientid { const nfs4_verifier * sc_verifier; /* request */ unsigned int sc_name_len; - char sc_name[48]; /* request */ + char sc_name[32]; /* request */ u32 sc_prog; /* request */ unsigned int sc_netid_len; char sc_netid[4]; /* request */ diff --git a/trunk/include/linux/node.h b/trunk/include/linux/node.h index bc001bc225c3..81dcec84cd8f 100644 --- a/trunk/include/linux/node.h +++ b/trunk/include/linux/node.h @@ -30,20 +30,12 @@ extern struct node node_devices[]; extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); -#ifdef CONFIG_NUMA extern int register_one_node(int nid); extern void unregister_one_node(int nid); +#ifdef CONFIG_NUMA extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); #else -static inline int register_one_node(int nid) -{ - return 0; -} -static inline int unregister_one_node(int nid) -{ - return 0; -} static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; diff --git a/trunk/include/linux/sunrpc/rpc_pipe_fs.h b/trunk/include/linux/sunrpc/rpc_pipe_fs.h index a481472c9484..2c2189cb30aa 100644 --- a/trunk/include/linux/sunrpc/rpc_pipe_fs.h +++ b/trunk/include/linux/sunrpc/rpc_pipe_fs.h @@ -42,9 +42,9 @@ RPC_I(struct inode *inode) extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); -extern int rpc_rmdir(struct dentry *); +extern int rpc_rmdir(char *); extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags); -extern int rpc_unlink(struct dentry *); +extern int rpc_unlink(char *); extern struct vfsmount *rpc_get_mount(void); extern void rpc_put_mount(void); diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h index 3a0cca255b76..840e47a4ccc5 100644 --- a/trunk/include/linux/sunrpc/xprt.h +++ b/trunk/include/linux/sunrpc/xprt.h @@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport; #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) -#define RPC_DEF_MIN_RESVPORT (665U) +#define RPC_DEF_MIN_RESVPORT (650U) #define RPC_DEF_MAX_RESVPORT (1023U) /* diff --git a/trunk/include/linux/tty.h b/trunk/include/linux/tty.h index 04827ca65781..e421d5e34818 100644 --- a/trunk/include/linux/tty.h +++ b/trunk/include/linux/tty.h @@ -59,7 +59,6 @@ struct tty_bufhead { struct tty_buffer *head; /* Queue head */ struct tty_buffer *tail; /* Active buffer */ struct tty_buffer *free; /* Free queue head */ - int memory_used; /* Buffer space used excluding free queue */ }; /* * The pty uses char_buf and flag_buf as a contiguous buffer diff --git a/trunk/include/linux/vt.h b/trunk/include/linux/vt.h index ba806e8711be..8ab334a48222 100644 --- a/trunk/include/linux/vt.h +++ b/trunk/include/linux/vt.h @@ -60,6 +60,5 @@ struct vt_consize { #define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ #define VT_LOCKSWITCH 0x560B /* disallow vt switching */ #define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ -#define VT_GETHIFONTMASK 0x560D /* return hi font mask */ #endif /* _LINUX_VT_H */ diff --git a/trunk/include/net/sctp/sctp.h b/trunk/include/net/sctp/sctp.h index 92eae0e0f3f1..a9663b49ea54 100644 --- a/trunk/include/net/sctp/sctp.h +++ b/trunk/include/net/sctp/sctp.h @@ -404,6 +404,19 @@ static inline int sctp_list_single_entry(struct list_head *head) return ((head->next != head) && (head->next == head->prev)); } +/* Calculate the size (in bytes) occupied by the data of an iovec. */ +static inline size_t get_user_iov_size(struct iovec *iov, int iovlen) +{ + size_t retval = 0; + + for (; iovlen > 0; --iovlen) { + retval += iov->iov_len; + iov++; + } + + return retval; +} + /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ static inline __s32 sctp_jitter(__u32 rto) { diff --git a/trunk/include/net/sctp/sm.h b/trunk/include/net/sctp/sm.h index de313de4fefe..1eac3d0eb7a9 100644 --- a/trunk/include/net/sctp/sm.h +++ b/trunk/include/net/sctp/sm.h @@ -221,7 +221,8 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct msghdr *, size_t msg_len); + const struct sctp_chunk *, + const struct msghdr *); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/trunk/include/scsi/libiscsi.h b/trunk/include/scsi/libiscsi.h index 41904f611d12..ba2760802ded 100644 --- a/trunk/include/scsi/libiscsi.h +++ b/trunk/include/scsi/libiscsi.h @@ -60,7 +60,6 @@ struct iscsi_nopin; #define TMABORT_SUCCESS 0x1 #define TMABORT_FAILED 0x2 #define TMABORT_TIMEDOUT 0x3 -#define TMABORT_NOT_FOUND 0x4 /* Connection suspend "bit" */ #define ISCSI_SUSPEND_BIT 1 @@ -84,12 +83,6 @@ struct iscsi_mgmt_task { struct list_head running; }; -enum { - ISCSI_TASK_COMPLETED, - ISCSI_TASK_PENDING, - ISCSI_TASK_RUNNING, -}; - struct iscsi_cmd_task { /* * Becuae LLDs allocate their hdr differently, this is a pointer to @@ -108,8 +101,6 @@ struct iscsi_cmd_task { struct iscsi_conn *conn; /* used connection */ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ - /* state set/tested under session->lock */ - int state; struct list_head running; /* running cmd list */ void *dd_data; /* driver/transport data */ }; @@ -135,14 +126,6 @@ struct iscsi_conn { int id; /* CID */ struct list_head item; /* maintains list of conns */ int c_stage; /* connection state */ - /* - * Preallocated buffer for pdus that have data but do not - * originate from scsi-ml. We never have two pdus using the - * buffer at the same time. It is only allocated to - * the default max recv size because the pdus we support - * should always fit in this buffer - */ - char *data; struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ @@ -151,7 +134,7 @@ struct iscsi_conn { struct kfifo *immqueue; /* immediate xmit queue */ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmt_run_list; /* list of control tasks */ - struct list_head xmitqueue; /* data-path cmd queue */ + struct kfifo *xmitqueue; /* data-path cmd queue */ struct list_head run_list; /* list of cmds in progress */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ /* diff --git a/trunk/include/scsi/scsi_transport_iscsi.h b/trunk/include/scsi/scsi_transport_iscsi.h index 39e833260bd0..5a3df1d7085f 100644 --- a/trunk/include/scsi/scsi_transport_iscsi.h +++ b/trunk/include/scsi/scsi_transport_iscsi.h @@ -57,6 +57,8 @@ struct sockaddr; * @stop_conn: suspend/recover/terminate connection * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. * @session_recovery_timedout: notify LLD a block during recovery timed out + * @suspend_conn_recv: susepend the recv side of the connection + * @termincate_conn: destroy socket connection. Called with mutex lock. * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs. * Called from queuecommand with session lock held. * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs. @@ -110,6 +112,8 @@ struct iscsi_transport { char *data, uint32_t data_size); void (*get_stats) (struct iscsi_cls_conn *conn, struct iscsi_stats *stats); + void (*suspend_conn_recv) (struct iscsi_conn *conn); + void (*terminate_conn) (struct iscsi_conn *conn); void (*init_cmd_task) (struct iscsi_cmd_task *ctask); void (*init_mgmt_task) (struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index 4ea6f0dc2fc5..1a649f2bb9bb 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -816,10 +816,6 @@ static int update_cpumask(struct cpuset *cs, char *buf) struct cpuset trialcs; int retval, cpus_unchanged; - /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ - if (cs == &top_cpuset) - return -EACCES; - trialcs = *cs; retval = cpulist_parse(buf, trialcs.cpus_allowed); if (retval < 0) @@ -2037,33 +2033,6 @@ int __init cpuset_init(void) return err; } -/* - * The top_cpuset tracks what CPUs and Memory Nodes are online, - * period. This is necessary in order to make cpusets transparent - * (of no affect) on systems that are actively using CPU hotplug - * but making no active use of cpusets. - * - * This handles CPU hotplug (cpuhp) events. If someday Memory - * Nodes can be hotplugged (dynamically changing node_online_map) - * then we should handle that too, perhaps in a similar way. - */ - -#ifdef CONFIG_HOTPLUG_CPU -static int cpuset_handle_cpuhp(struct notifier_block *nb, - unsigned long phase, void *cpu) -{ - mutex_lock(&manage_mutex); - mutex_lock(&callback_mutex); - - top_cpuset.cpus_allowed = cpu_online_map; - - mutex_unlock(&callback_mutex); - mutex_unlock(&manage_mutex); - - return 0; -} -#endif - /** * cpuset_init_smp - initialize cpus_allowed * @@ -2074,8 +2043,6 @@ void __init cpuset_init_smp(void) { top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_online_map; - - hotcpu_notifier(cpuset_handle_cpuhp, 0); } /** @@ -2420,7 +2387,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); int cpuset_excl_nodes_overlap(const struct task_struct *p) { const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ - int overlap = 1; /* do cpusets overlap? */ + int overlap = 0; /* do cpusets overlap? */ task_lock(current); if (current->flags & PF_EXITING) { diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index b9b8aea5389e..d4633c588f33 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) p = NULL; goto out_unlock; } - if (p->exit_state != 0) { + if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { p = NULL; goto out_unlock; } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index a234fbee1238..a2be2d055299 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -4162,8 +4162,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) read_unlock_irq(&tasklist_lock); return -ESRCH; } - retval = sched_setscheduler(p, policy, &lparam); + get_task_struct(p); read_unlock_irq(&tasklist_lock); + retval = sched_setscheduler(p, policy, &lparam); + put_task_struct(p); return retval; } diff --git a/trunk/kernel/stop_machine.c b/trunk/kernel/stop_machine.c index 51cacd111dbd..dcfb5d731466 100644 --- a/trunk/kernel/stop_machine.c +++ b/trunk/kernel/stop_machine.c @@ -111,6 +111,7 @@ static int stop_machine(void) /* If some failed, kill them all. */ if (ret < 0) { stopmachine_set_state(STOPMACHINE_EXIT); + up(&stopmachine_mutex); return ret; } diff --git a/trunk/lib/ts_bm.c b/trunk/lib/ts_bm.c index d90822c378a4..0110e4414805 100644 --- a/trunk/lib/ts_bm.c +++ b/trunk/lib/ts_bm.c @@ -111,14 +111,15 @@ static int subpattern(u8 *pattern, int i, int j, int g) return ret; } -static void compute_prefix_tbl(struct ts_bm *bm) +static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, + unsigned int len) { int i, j, g; for (i = 0; i < ASIZE; i++) - bm->bad_shift[i] = bm->patlen; - for (i = 0; i < bm->patlen - 1; i++) - bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; + bm->bad_shift[i] = len; + for (i = 0; i < len - 1; i++) + bm->bad_shift[pattern[i]] = len - 1 - i; /* Compute the good shift array, used to match reocurrences * of a subpattern */ @@ -149,8 +150,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, bm = ts_config_priv(conf); bm->patlen = len; bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; + compute_prefix_tbl(bm, pattern, len); memcpy(bm->pattern, pattern, len); - compute_prefix_tbl(bm); return conf; } diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index f1f5ec783781..e70d6c6d6fee 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -442,12 +442,11 @@ int swap_type_of(dev_t device) if (!(swap_info[i].flags & SWP_WRITEOK)) continue; - if (!device) { spin_unlock(&swap_lock); return i; } - inode = swap_info[i].swap_file->f_dentry->d_inode; + inode = swap_info->swap_file->f_dentry->d_inode; if (S_ISBLK(inode->i_mode) && device == MKDEV(imajor(inode), iminor(inode))) { spin_unlock(&swap_lock); diff --git a/trunk/net/bridge/br_forward.c b/trunk/net/bridge/br_forward.c index 864fbbc7b24d..6ccd32b30809 100644 --- a/trunk/net/bridge/br_forward.c +++ b/trunk/net/bridge/br_forward.c @@ -40,15 +40,11 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) else { #ifdef CONFIG_BRIDGE_NETFILTER /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ - if (nf_bridge_maybe_copy_header(skb)) - kfree_skb(skb); - else + nf_bridge_maybe_copy_header(skb); #endif - { - skb_push(skb, ETH_HLEN); + skb_push(skb, ETH_HLEN); - dev_queue_xmit(skb); - } + dev_queue_xmit(skb); } return 0; diff --git a/trunk/net/dccp/ccids/ccid3.c b/trunk/net/dccp/ccids/ccid3.c index 090bc39e8199..c39bff706cfc 100644 --- a/trunk/net/dccp/ccids/ccid3.c +++ b/trunk/net/dccp/ccids/ccid3.c @@ -2,7 +2,7 @@ * net/dccp/ccids/ccid3.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005-6 Ian McDonald + * Copyright (c) 2005-6 Ian McDonald * * An implementation of the DCCP protocol * @@ -342,8 +342,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; - timeval_add_usecs(&hctx->ccid3hctx_t_nom, - hctx->ccid3hctx_t_ipi); } out: return rc; @@ -415,8 +413,7 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len) case TFRC_SSTATE_NO_FBACK: case TFRC_SSTATE_FBACK: if (len > 0) { - timeval_sub_usecs(&hctx->ccid3hctx_t_nom, - hctx->ccid3hctx_t_ipi); + hctx->ccid3hctx_t_nom = now; ccid3_calc_new_t_ipi(hctx); ccid3_calc_new_delta(hctx); timeval_add_usecs(&hctx->ccid3hctx_t_nom, @@ -760,7 +757,8 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) } hcrx->ccid3hcrx_tstamp_last_feedback = now; - hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval; + hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; + hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno; hcrx->ccid3hcrx_bytes_recv = 0; /* Convert to multiples of 10us */ @@ -784,7 +782,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) return 0; - DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter; + DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; if (dccp_packet_without_ack(skb)) return 0; @@ -856,11 +854,6 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) interval = 1; } found: - if (!tail) { - LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n", - __FUNCTION__); - return ~0; - } rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", dccp_role(sk), sk, rtt); @@ -871,20 +864,9 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); - if (x_recv == 0) - x_recv = hcrx->ccid3hcrx_x_recv; - tmp1 = (u64)x_recv * (u64)rtt; do_div(tmp1,10000000); tmp2 = (u32)tmp1; - - if (!tmp2) { - LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 " - "%s: x_recv = %u, rtt =%u\n", - __FUNCTION__, x_recv, rtt); - return ~0; - } - fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; /* do not alter order above or you will get overflow on 32 bit */ p = tfrc_calc_x_reverse_lookup(fval); @@ -900,101 +882,31 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); - struct dccp_li_hist_entry *next, *head; - u64 seq_temp; - if (list_empty(&hcrx->ccid3hcrx_li_hist)) { - if (!dccp_li_hist_interval_new(ccid3_li_hist, - &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss)) - return; + if (seq_loss != DCCP_MAX_SEQNO + 1 && + list_empty(&hcrx->ccid3hcrx_li_hist)) { + struct dccp_li_hist_entry *li_tail; - next = (struct dccp_li_hist_entry *) - hcrx->ccid3hcrx_li_hist.next; - next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); - } else { - struct dccp_li_hist_entry *entry; - struct list_head *tail; - - head = (struct dccp_li_hist_entry *) - hcrx->ccid3hcrx_li_hist.next; - /* FIXME win count check removed as was wrong */ - /* should make this check with receive history */ - /* and compare there as per section 10.2 of RFC4342 */ - - /* new loss event detected */ - /* calculate last interval length */ - seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); - entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); - - if (entry == NULL) { - printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__); - dump_stack(); + li_tail = dccp_li_hist_interval_new(ccid3_li_hist, + &hcrx->ccid3hcrx_li_hist, + seq_loss, win_loss); + if (li_tail == NULL) return; - } - - list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist); - - tail = hcrx->ccid3hcrx_li_hist.prev; - list_del(tail); - kmem_cache_free(ccid3_li_hist->dccplih_slab, tail); - - /* Create the newest interval */ - entry->dccplih_seqno = seq_loss; - entry->dccplih_interval = seq_temp; - entry->dccplih_win_count = win_loss; - } + li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); + } else + LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " + "interval\n", __FUNCTION__); } -static int ccid3_hc_rx_detect_loss(struct sock *sk, - struct dccp_rx_hist_entry *packet) +static void ccid3_hc_rx_detect_loss(struct sock *sk) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); - struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist); - u64 seqno = packet->dccphrx_seqno; - u64 tmp_seqno; - int loss = 0; - u8 ccval; - - - tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; - - if (!rx_hist || - follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { - hcrx->ccid3hcrx_seqno_nonloss = seqno; - hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; - goto detect_out; - } - + u8 win_loss; + const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, + &hcrx->ccid3hcrx_li_hist, + &win_loss); - while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno) - > TFRC_RECV_NUM_LATE_LOSS) { - loss = 1; - ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss, - hcrx->ccid3hcrx_ccval_nonloss); - tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; - dccp_inc_seqno(&tmp_seqno); - hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; - dccp_inc_seqno(&tmp_seqno); - while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist, - tmp_seqno, &ccval)) { - hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; - hcrx->ccid3hcrx_ccval_nonloss = ccval; - dccp_inc_seqno(&tmp_seqno); - } - } - - /* FIXME - this code could be simplified with above while */ - /* but works at moment */ - if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { - hcrx->ccid3hcrx_seqno_nonloss = seqno; - hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; - } - -detect_out: - dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_li_hist, packet, - hcrx->ccid3hcrx_seqno_nonloss); - return loss; + ccid3_hc_rx_update_li(sk, seq_loss, win_loss); } static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) @@ -1004,8 +916,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) struct dccp_rx_hist_entry *packet; struct timeval now; u8 win_count; - u32 p_prev, rtt_prev, r_sample, t_elapsed; - int loss; + u32 p_prev, r_sample, t_elapsed; + int ins; BUG_ON(hcrx == NULL || !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || @@ -1020,7 +932,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_DATAACK: if (opt_recv->dccpor_timestamp_echo == 0) break; - rtt_prev = hcrx->ccid3hcrx_rtt; + p_prev = hcrx->ccid3hcrx_rtt; dccp_timestamp(sk, &now); timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); r_sample = timeval_usecs(&now); @@ -1039,8 +951,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + r_sample / 10; - if (rtt_prev != hcrx->ccid3hcrx_rtt) - ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n", + if (p_prev != hcrx->ccid3hcrx_rtt) + ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", dccp_role(sk), hcrx->ccid3hcrx_rtt, opt_recv->dccpor_elapsed_time); break; @@ -1061,7 +973,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) win_count = packet->dccphrx_ccval; - loss = ccid3_hc_rx_detect_loss(sk, packet); + ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, + &hcrx->ccid3hcrx_li_hist, packet); if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) return; @@ -1078,7 +991,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case TFRC_RSTATE_DATA: hcrx->ccid3hcrx_bytes_recv += skb->len - dccp_hdr(skb)->dccph_doff * 4; - if (loss) + if (ins != 0) break; dccp_timestamp(sk, &now); @@ -1099,6 +1012,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", dccp_role(sk), sk, dccp_state_name(sk->sk_state)); + ccid3_hc_rx_detect_loss(sk); p_prev = hcrx->ccid3hcrx_p; /* Calculate loss event rate */ @@ -1108,9 +1022,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) /* Scaling up by 1000000 as fixed decimal */ if (i_mean != 0) hcrx->ccid3hcrx_p = 1000000 / i_mean; - } else { - printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__); - dump_stack(); } if (hcrx->ccid3hcrx_p > p_prev) { @@ -1319,7 +1230,7 @@ static __exit void ccid3_module_exit(void) } module_exit(ccid3_module_exit); -MODULE_AUTHOR("Ian McDonald , " +MODULE_AUTHOR("Ian McDonald , " "Arnaldo Carvalho de Melo "); MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); MODULE_LICENSE("GPL"); diff --git a/trunk/net/dccp/ccids/ccid3.h b/trunk/net/dccp/ccids/ccid3.h index 0a2cb7536d26..5ade4f668b22 100644 --- a/trunk/net/dccp/ccids/ccid3.h +++ b/trunk/net/dccp/ccids/ccid3.h @@ -1,13 +1,13 @@ /* * net/dccp/ccids/ccid3.h * - * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz + * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -120,10 +120,9 @@ struct ccid3_hc_rx_sock { #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p - u64 ccid3hcrx_seqno_nonloss:48, - ccid3hcrx_ccval_nonloss:4, + u64 ccid3hcrx_seqno_last_counter:48, ccid3hcrx_state:8, - ccid3hcrx_ccval_last_counter:4; + ccid3hcrx_last_counter:4; u32 ccid3hcrx_bytes_recv; struct timeval ccid3hcrx_tstamp_last_feedback; struct timeval ccid3hcrx_tstamp_last_ack; diff --git a/trunk/net/dccp/ccids/lib/loss_interval.c b/trunk/net/dccp/ccids/lib/loss_interval.c index 906c81ab9d4f..5d7b7d864385 100644 --- a/trunk/net/dccp/ccids/lib/loss_interval.c +++ b/trunk/net/dccp/ccids/lib/loss_interval.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/loss_interval.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005-6 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * * This program is free software; you can redistribute it and/or modify @@ -12,7 +12,6 @@ */ #include -#include #include "loss_interval.h" @@ -91,13 +90,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) u32 w_tot = 0; list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { - if (li_entry->dccplih_interval != ~0) { + if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; w_tot += dccp_li_hist_w[i]; - if (i != 0) - i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; } + if (i != 0) + i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) break; @@ -108,36 +107,37 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) i_tot = max(i_tot0, i_tot1); - if (!w_tot) { - LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__); - return 1; - } + /* FIXME: Why do we do this? -Ian McDonald */ + if (i_tot * 4 < w_tot) + i_tot = w_tot * 4; - return i_tot / w_tot; + return i_tot * 4 / w_tot; } EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); -int dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, const u64 seq_loss, const u8 win_loss) +struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, + const u64 seq_loss, + const u8 win_loss) { - struct dccp_li_hist_entry *entry; + struct dccp_li_hist_entry *tail = NULL, *entry; int i; - for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { + for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); - dump_stack(); - return 0; + return NULL; } - entry->dccplih_interval = ~0; + if (tail == NULL) + tail = entry; list_add(&entry->dccplih_node, list); } entry->dccplih_seqno = seq_loss; entry->dccplih_win_count = win_loss; - return 1; + return tail; } EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); diff --git a/trunk/net/dccp/ccids/lib/loss_interval.h b/trunk/net/dccp/ccids/lib/loss_interval.h index 0ae85f0340b2..43bf78269d1d 100644 --- a/trunk/net/dccp/ccids/lib/loss_interval.h +++ b/trunk/net/dccp/ccids/lib/loss_interval.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/loss_interval.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * * This program is free software; you can redistribute it and/or modify it @@ -52,6 +52,9 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist, extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); -extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, const u64 seq_loss, const u8 win_loss); +extern struct dccp_li_hist_entry * + dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, + const u64 seq_loss, + const u8 win_loss); #endif /* _DCCP_LI_HIST_ */ diff --git a/trunk/net/dccp/ccids/lib/packet_history.c b/trunk/net/dccp/ccids/lib/packet_history.c index b876c9c81c65..ad98d6a322eb 100644 --- a/trunk/net/dccp/ccids/lib/packet_history.c +++ b/trunk/net/dccp/ccids/lib/packet_history.c @@ -1,13 +1,13 @@ /* - * net/dccp/packet_history.c + * net/dccp/packet_history.h * - * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz + * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -112,27 +112,64 @@ struct dccp_rx_hist_entry * EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); -void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet, - u64 nonloss_seqno) + struct dccp_rx_hist_entry *packet) { - struct dccp_rx_hist_entry *entry, *next; + struct dccp_rx_hist_entry *entry, *next, *iter; u8 num_later = 0; - list_add(&packet->dccphrx_node, rx_list); + iter = dccp_rx_hist_head(rx_list); + if (iter == NULL) + dccp_rx_hist_add_entry(rx_list, packet); + else { + const u64 seqno = packet->dccphrx_seqno; + + if (after48(seqno, iter->dccphrx_seqno)) + dccp_rx_hist_add_entry(rx_list, packet); + else { + if (dccp_rx_hist_entry_data_packet(iter)) + num_later = 1; + + list_for_each_entry_continue(iter, rx_list, + dccphrx_node) { + if (after48(seqno, iter->dccphrx_seqno)) { + dccp_rx_hist_add_entry(&iter->dccphrx_node, + packet); + goto trim_history; + } + + if (dccp_rx_hist_entry_data_packet(iter)) + num_later++; + if (num_later == TFRC_RECV_NUM_LATE_LOSS) { + dccp_rx_hist_entry_delete(hist, packet); + return 1; + } + } + + if (num_later < TFRC_RECV_NUM_LATE_LOSS) + dccp_rx_hist_add_entry(rx_list, packet); + /* + * FIXME: else what? should we destroy the packet + * like above? + */ + } + } + +trim_history: + /* + * Trim history (remove all packets after the NUM_LATE_LOSS + 1 + * data packets) + */ num_later = TFRC_RECV_NUM_LATE_LOSS + 1; if (!list_empty(li_list)) { list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { if (num_later == 0) { - if (after48(nonloss_seqno, - entry->dccphrx_seqno)) { - list_del_init(&entry->dccphrx_node); - dccp_rx_hist_entry_delete(hist, entry); - } + list_del_init(&entry->dccphrx_node); + dccp_rx_hist_entry_delete(hist, entry); } else if (dccp_rx_hist_entry_data_packet(entry)) --num_later; } @@ -180,10 +217,94 @@ void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, --num_later; } } + + return 0; } EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); +u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, + struct list_head *li_list, u8 *win_loss) +{ + struct dccp_rx_hist_entry *entry, *next, *packet; + struct dccp_rx_hist_entry *a_loss = NULL; + struct dccp_rx_hist_entry *b_loss = NULL; + u64 seq_loss = DCCP_MAX_SEQNO + 1; + u8 num_later = TFRC_RECV_NUM_LATE_LOSS; + + list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { + if (num_later == 0) { + b_loss = entry; + break; + } else if (dccp_rx_hist_entry_data_packet(entry)) + --num_later; + } + + if (b_loss == NULL) + goto out; + + num_later = 1; + list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { + if (num_later == 0) { + a_loss = entry; + break; + } else if (dccp_rx_hist_entry_data_packet(entry)) + --num_later; + } + + if (a_loss == NULL) { + if (list_empty(li_list)) { + /* no loss event have occured yet */ + LIMIT_NETDEBUG("%s: TODO: find a lost data packet by " + "comparing to initial seqno\n", + __FUNCTION__); + goto out; + } else { + LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!", + __FUNCTION__); + goto out; + } + } + + /* Locate a lost data packet */ + entry = packet = b_loss; + list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { + u64 delta = dccp_delta_seqno(entry->dccphrx_seqno, + packet->dccphrx_seqno); + + if (delta != 0) { + if (dccp_rx_hist_entry_data_packet(packet)) + --delta; + /* + * FIXME: check this, probably this % usage is because + * in earlier drafts the ndp count was just 8 bits + * long, but now it cam be up to 24 bits long. + */ +#if 0 + if (delta % DCCP_NDP_LIMIT != + (packet->dccphrx_ndp - + entry->dccphrx_ndp) % DCCP_NDP_LIMIT) +#endif + if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) { + seq_loss = entry->dccphrx_seqno; + dccp_inc_seqno(&seq_loss); + } + } + packet = entry; + if (packet == a_loss) + break; + } +out: + if (seq_loss != DCCP_MAX_SEQNO + 1) + *win_loss = a_loss->dccphrx_ccval; + else + *win_loss = 0; /* Paranoia */ + + return seq_loss; +} + +EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss); + struct dccp_tx_hist *dccp_tx_hist_new(const char *name) { struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); @@ -244,25 +365,6 @@ struct dccp_tx_hist_entry * EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); -int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, - u8 *ccval) -{ - struct dccp_rx_hist_entry *packet = NULL, *entry; - - list_for_each_entry(entry, list, dccphrx_node) - if (entry->dccphrx_seqno == seq) { - packet = entry; - break; - } - - if (packet) - *ccval = packet->dccphrx_ccval; - - return packet != NULL; -} - -EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry); - void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, struct list_head *list, struct dccp_tx_hist_entry *packet) @@ -289,7 +391,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list) EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); -MODULE_AUTHOR("Ian McDonald , " +MODULE_AUTHOR("Ian McDonald , " "Arnaldo Carvalho de Melo "); MODULE_DESCRIPTION("DCCP TFRC library"); MODULE_LICENSE("GPL"); diff --git a/trunk/net/dccp/ccids/lib/packet_history.h b/trunk/net/dccp/ccids/lib/packet_history.h index 067cf1c85a37..673c209e4e85 100644 --- a/trunk/net/dccp/ccids/lib/packet_history.h +++ b/trunk/net/dccp/ccids/lib/packet_history.h @@ -1,13 +1,13 @@ /* * net/dccp/packet_history.h * - * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz + * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -106,8 +106,6 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist, extern struct dccp_tx_hist_entry * dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq); -extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, - u8 *ccval); static inline void dccp_tx_hist_add_entry(struct list_head *list, struct dccp_tx_hist_entry *entry) @@ -166,6 +164,12 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist, extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list); +static inline void dccp_rx_hist_add_entry(struct list_head *list, + struct dccp_rx_hist_entry *entry) +{ + list_add(&entry->dccphrx_node, list); +} + static inline struct dccp_rx_hist_entry * dccp_rx_hist_head(struct list_head *list) { @@ -184,11 +188,10 @@ static inline int entry->dccphrx_type == DCCP_PKT_DATAACK; } -extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet, - u64 nonloss_seqno); + struct dccp_rx_hist_entry *packet); extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, struct list_head *li_list, u8 *win_loss); diff --git a/trunk/net/dccp/ccids/lib/tfrc.h b/trunk/net/dccp/ccids/lib/tfrc.h index 45f30f59ea2a..130c4c40cfe3 100644 --- a/trunk/net/dccp/ccids/lib/tfrc.h +++ b/trunk/net/dccp/ccids/lib/tfrc.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/tfrc.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/trunk/net/dccp/ccids/lib/tfrc_equation.c b/trunk/net/dccp/ccids/lib/tfrc_equation.c index 44076e0c6591..4fd2ebebf5a0 100644 --- a/trunk/net/dccp/ccids/lib/tfrc_equation.c +++ b/trunk/net/dccp/ccids/lib/tfrc_equation.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/tfrc_equation.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/trunk/net/dccp/dccp.h b/trunk/net/dccp/dccp.h index a5c5475724c0..d00a2f4ee5dd 100644 --- a/trunk/net/dccp/dccp.h +++ b/trunk/net/dccp/dccp.h @@ -5,7 +5,7 @@ * * An implementation of the DCCP protocol * Copyright (c) 2005 Arnaldo Carvalho de Melo - * Copyright (c) 2005-6 Ian McDonald + * Copyright (c) 2005 Ian McDonald * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as @@ -81,14 +81,6 @@ static inline u64 max48(const u64 seq1, const u64 seq2) return after48(seq1, seq2) ? seq1 : seq2; } -/* is seq1 next seqno after seq2 */ -static inline int follows48(const u64 seq1, const u64 seq2) -{ - int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF); - - return diff==1; -} - enum { DCCP_MIB_NUM = 0, DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ diff --git a/trunk/net/dccp/options.c b/trunk/net/dccp/options.c index 07a34696ac97..daf72bb671f0 100644 --- a/trunk/net/dccp/options.c +++ b/trunk/net/dccp/options.c @@ -4,7 +4,7 @@ * An implementation of the DCCP protocol * Copyright (c) 2005 Aristeu Sergio Rozanski Filho * Copyright (c) 2005 Arnaldo Carvalho de Melo - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/trunk/net/ipv4/netfilter/arp_tables.c b/trunk/net/ipv4/netfilter/arp_tables.c index 8d1d7a6e72a5..df4854cf598b 100644 --- a/trunk/net/ipv4/netfilter/arp_tables.c +++ b/trunk/net/ipv4/netfilter/arp_tables.c @@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, struct arpt_entry *e, *back; const char *indev, *outdev; void *table_base; - struct xt_table_info *private; + struct xt_table_info *private = table->private; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + @@ -248,7 +248,6 @@ unsigned int arpt_do_table(struct sk_buff **pskb, outdev = out ? out->name : nulldevname; read_lock_bh(&table->lock); - private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index b4f3ffe1b3b4..507adefbc17c 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -201,7 +201,6 @@ void tcp_select_initial_window(int __space, __u32 mss, * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); - space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index 802a1a6b1037..b843a650be71 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -944,7 +944,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * comment in that function for the gory details. -acme */ - newsk->sk_gso_type = SKB_GSO_TCPV6; + sk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL); newtcp6sk = (struct tcp6_sock *)newsk; diff --git a/trunk/net/sctp/sm_make_chunk.c b/trunk/net/sctp/sm_make_chunk.c index 17b509282cf2..4f11f5858209 100644 --- a/trunk/net/sctp/sm_make_chunk.c +++ b/trunk/net/sctp/sm_make_chunk.c @@ -806,26 +806,38 @@ struct sctp_chunk *sctp_make_abort_no_data( /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, - const struct msghdr *msg, - size_t paylen) + const struct sctp_chunk *chunk, + const struct msghdr *msg) { struct sctp_chunk *retval; - void *payload = NULL; - int err; + void *payload = NULL, *payoff; + size_t paylen = 0; + struct iovec *iov = NULL; + int iovlen = 0; + + if (msg) { + iov = msg->msg_iov; + iovlen = msg->msg_iovlen; + paylen = get_user_iov_size(iov, iovlen); + } - retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); + retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ - payload = kmalloc(paylen, GFP_KERNEL); + payload = kmalloc(paylen, GFP_ATOMIC); if (!payload) goto err_payload; + payoff = payload; - err = memcpy_fromiovec(payload, msg->msg_iov, paylen); - if (err < 0) - goto err_copy; + for (; iovlen > 0; --iovlen) { + if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) + goto err_copy; + payoff += iov->iov_len; + iov++; + } } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index 5b5ae7958322..ead3f1b0ea3d 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -4031,12 +4031,18 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ - struct sctp_chunk *abort = arg; + struct msghdr *msg = arg; + struct sctp_chunk *abort; sctp_disposition_t retval; retval = SCTP_DISPOSITION_CONSUME; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + /* Generate ABORT chunk to send the peer. */ + abort = sctp_make_abort_user(asoc, NULL, msg); + if (!abort) + retval = SCTP_DISPOSITION_NOMEM; + else + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. @@ -4160,7 +4166,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *abort = arg; + struct msghdr *msg = arg; + struct sctp_chunk *abort; sctp_disposition_t retval; /* Stop T1-init timer */ @@ -4168,7 +4175,12 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); retval = SCTP_DISPOSITION_CONSUME; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + /* Generate ABORT chunk to send the peer */ + abort = sctp_make_abort_user(asoc, NULL, msg); + if (!abort) + retval = SCTP_DISPOSITION_NOMEM; + else + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index fde3f55bfd4b..54722e622e6d 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -1520,16 +1520,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { - struct sctp_chunk *chunk; - - chunk = sctp_make_abort_user(asoc, msg, msg_len); - if (!chunk) { - err = -ENOMEM; - goto out_unlock; - } - SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); - sctp_primitive_ABORT(asoc, chunk); + sctp_primitive_ABORT(asoc, msg); err = 0; goto out_unlock; } diff --git a/trunk/net/sunrpc/auth_gss/auth_gss.c b/trunk/net/sunrpc/auth_gss/auth_gss.c index ef1cf5b476c8..4a9aa9393b97 100644 --- a/trunk/net/sunrpc/auth_gss/auth_gss.c +++ b/trunk/net/sunrpc/auth_gss/auth_gss.c @@ -718,7 +718,8 @@ gss_destroy(struct rpc_auth *auth) auth, auth->au_flavor); gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->dentry); + rpc_unlink(gss_auth->path); + dput(gss_auth->dentry); gss_auth->dentry = NULL; gss_mech_put(gss_auth->mech); diff --git a/trunk/net/sunrpc/clnt.c b/trunk/net/sunrpc/clnt.c index 3e19d321067a..d6409e757219 100644 --- a/trunk/net/sunrpc/clnt.c +++ b/trunk/net/sunrpc/clnt.c @@ -183,7 +183,8 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, out_no_auth: if (!IS_ERR(clnt->cl_dentry)) { - rpc_rmdir(clnt->cl_dentry); + rpc_rmdir(clnt->cl_pathname); + dput(clnt->cl_dentry); rpc_put_mount(); } out_no_path: @@ -250,8 +251,10 @@ rpc_clone_client(struct rpc_clnt *clnt) new->cl_autobind = 0; new->cl_oneshot = 0; new->cl_dead = 0; - if (!IS_ERR(new->cl_dentry)) + if (!IS_ERR(new->cl_dentry)) { dget(new->cl_dentry); + rpc_get_mount(); + } rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); @@ -314,15 +317,11 @@ rpc_destroy_client(struct rpc_clnt *clnt) clnt->cl_auth = NULL; } if (clnt->cl_parent != clnt) { - if (!IS_ERR(clnt->cl_dentry)) - dput(clnt->cl_dentry); rpc_destroy_client(clnt->cl_parent); goto out_free; } - if (!IS_ERR(clnt->cl_dentry)) { - rpc_rmdir(clnt->cl_dentry); - rpc_put_mount(); - } + if (clnt->cl_pathname[0]) + rpc_rmdir(clnt->cl_pathname); if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; @@ -332,6 +331,10 @@ rpc_destroy_client(struct rpc_clnt *clnt) out_free: rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; + if (!IS_ERR(clnt->cl_dentry)) { + dput(clnt->cl_dentry); + rpc_put_mount(); + } kfree(clnt); return 0; } @@ -1181,17 +1184,6 @@ call_verify(struct rpc_task *task) u32 *p = iov->iov_base, n; int error = -EACCES; - if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { - /* RFC-1014 says that the representation of XDR data must be a - * multiple of four bytes - * - if it isn't pointer subtraction in the NFS client may give - * undefined results - */ - printk(KERN_WARNING - "call_verify: XDR representation not a multiple of" - " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); - goto out_eio; - } if ((len -= 3) < 0) goto out_overflow; p += 1; /* skip XID */ diff --git a/trunk/net/sunrpc/rpc_pipe.c b/trunk/net/sunrpc/rpc_pipe.c index 0b1a1ac8a4bc..a3bd2db2e024 100644 --- a/trunk/net/sunrpc/rpc_pipe.c +++ b/trunk/net/sunrpc/rpc_pipe.c @@ -539,7 +539,6 @@ rpc_depopulate(struct dentry *parent) rpc_close_pipes(dentry->d_inode); simple_unlink(dir, dentry); } - inode_dir_notify(dir, DN_DELETE); dput(dentry); } while (n); goto repeat; @@ -611,8 +610,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) int error; shrink_dcache_parent(dentry); - if (d_unhashed(dentry)) - return 0; + if (dentry->d_inode) + rpc_close_pipes(dentry->d_inode); if ((error = simple_rmdir(dir, dentry)) != 0) return error; if (!error) { @@ -685,20 +684,28 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) } int -rpc_rmdir(struct dentry *dentry) +rpc_rmdir(char *path) { - struct dentry *parent; + struct nameidata nd; + struct dentry *dentry; struct inode *dir; int error; - parent = dget_parent(dentry); - dir = parent->d_inode; + if ((error = rpc_lookup_parent(path, &nd)) != 0) + return error; + dir = nd.dentry->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); + goto out_release; + } rpc_depopulate(dentry); error = __rpc_rmdir(dir, dentry); dput(dentry); +out_release: mutex_unlock(&dir->i_mutex); - dput(parent); + rpc_release_path(&nd); return error; } @@ -739,26 +746,32 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) } int -rpc_unlink(struct dentry *dentry) +rpc_unlink(char *path) { - struct dentry *parent; + struct nameidata nd; + struct dentry *dentry; struct inode *dir; - int error = 0; + int error; - parent = dget_parent(dentry); - dir = parent->d_inode; + if ((error = rpc_lookup_parent(path, &nd)) != 0) + return error; + dir = nd.dentry->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - if (!d_unhashed(dentry)) { - d_drop(dentry); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - error = simple_unlink(dir, dentry); - } - inode_dir_notify(dir, DN_DELETE); + dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); + goto out_release; + } + d_drop(dentry); + if (dentry->d_inode) { + rpc_close_pipes(dentry->d_inode); + error = simple_unlink(dir, dentry); } dput(dentry); + inode_dir_notify(dir, DN_DELETE); +out_release: mutex_unlock(&dir->i_mutex); - dput(parent); + rpc_release_path(&nd); return error; }