diff --git a/[refs] b/[refs] index 87f8e9deaeac..964c01c000e6 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c9b84dcac6e20ec8e5424f3dff853daa863bfdd0 +refs/heads/master: bc519f30eb039f023c15167663d5a8a14fed7dcb diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 43ab119963d5..421bcfff6ad2 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -57,15 +57,6 @@ Who: Jody McIntyre --------------------------- -What: sbp2: module parameter "force_inquiry_hack" -When: July 2006 -Why: Superceded by parameter "workarounds". Both parameters are meant to be - used ad-hoc and for single devices only, i.e. not in modprobe.conf, - therefore the impact of this feature replacement should be low. -Who: Stefan Richter - ---------------------------- - What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. When: July 2006 Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6 diff --git a/trunk/Documentation/watchdog/watchdog-api.txt b/trunk/Documentation/watchdog/watchdog-api.txt index 21ed51173662..c5beb548cfc4 100644 --- a/trunk/Documentation/watchdog/watchdog-api.txt +++ b/trunk/Documentation/watchdog/watchdog-api.txt @@ -36,9 +36,6 @@ timeout or margin. The simplest way to ping the watchdog is to write some data to the device. So a very simple watchdog daemon would look like this: -#include -#include - int main(int argc, const char *argv[]) { int fd=open("/dev/watchdog",O_WRONLY); if (fd==-1) { diff --git a/trunk/arch/arm/mach-pxa/mainstone.c b/trunk/arch/arm/mach-pxa/mainstone.c index 02e188d98e7d..98356f810007 100644 --- a/trunk/arch/arm/mach-pxa/mainstone.c +++ b/trunk/arch/arm/mach-pxa/mainstone.c @@ -95,10 +95,7 @@ static void __init mainstone_init_irq(void) for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { set_irq_chip(irq, &mainstone_irq_chip); set_irq_handler(irq, do_level_IRQ); - if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); - else - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } set_irq_flags(MAINSTONE_IRQ(8), 0); set_irq_flags(MAINSTONE_IRQ(12), 0); diff --git a/trunk/arch/arm/mach-s3c2410/sleep.S b/trunk/arch/arm/mach-s3c2410/sleep.S index 73de2eaca22a..832fb86a03b4 100644 --- a/trunk/arch/arm/mach-s3c2410/sleep.S +++ b/trunk/arch/arm/mach-s3c2410/sleep.S @@ -59,7 +59,8 @@ ENTRY(s3c2410_cpu_suspend) mrc p15, 0, r5, c13, c0, 0 @ PID mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ translation table base address - mrc p15, 0, r8, c1, c0, 0 @ control register + mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register + mrc p15, 0, r9, c1, c0, 0 @ control register stmia r0, { r4 - r13 } @@ -164,6 +165,7 @@ ENTRY(s3c2410_cpu_resume) mcr p15, 0, r5, c13, c0, 0 @ PID mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ translation table base + mcr p15, 0, r8, c1, c1, 0 @ auxilliary control #ifdef CONFIG_DEBUG_RESUME mov r3, #'R' @@ -171,7 +173,7 @@ ENTRY(s3c2410_cpu_resume) #endif ldr r2, =resume_with_mmu - mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc + mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc nop @ second-to-last before mmu mov pc, r2 @ go back to virtual address diff --git a/trunk/arch/ia64/configs/sn2_defconfig b/trunk/arch/ia64/configs/sn2_defconfig index 9ea35398e10d..f6a8853cd1b4 100644 --- a/trunk/arch/ia64/configs/sn2_defconfig +++ b/trunk/arch/ia64/configs/sn2_defconfig @@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_NUMA=y -CONFIG_NODES_SHIFT=10 +CONFIG_NODES_SHIFT=8 CONFIG_VIRTUAL_MEM_MAP=y CONFIG_HOLES_IN_ZONE=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y @@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set CONFIG_DEBUG_PREEMPT=y -# CONFIG_DEBUG_MUTEXES is not set +CONFIG_DEBUG_MUTEXES=y # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_KOBJECT is not set diff --git a/trunk/arch/ia64/kernel/iosapic.c b/trunk/arch/ia64/kernel/iosapic.c index d58c1c5c903a..7956eb9058fc 100644 --- a/trunk/arch/ia64/kernel/iosapic.c +++ b/trunk/arch/ia64/kernel/iosapic.c @@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq) ia64_vector vec = irq_to_vector(irq); struct iosapic_rte_info *rte; - move_native_irq(irq); + move_irq(irq); list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) iosapic_eoi(rte->addr, vec); } @@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq) { irq_desc_t *idesc = irq_descp(irq); - move_native_irq(irq); + move_irq(irq); /* * Once we have recorded IRQ_PENDING already, we can mask the * interrupt for real. This prevents IRQ storms from unhandled diff --git a/trunk/arch/ia64/kernel/irq.c b/trunk/arch/ia64/kernel/irq.c index 9c72ea3f6432..5ce908ef9c95 100644 --- a/trunk/arch/ia64/kernel/irq.c +++ b/trunk/arch/ia64/kernel/irq.c @@ -101,6 +101,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) if (irq < NR_IRQS) { irq_affinity[irq] = mask; + set_irq_info(irq, mask); irq_redir[irq] = (char) (redir & 0xff); } } diff --git a/trunk/drivers/char/watchdog/i8xx_tco.c b/trunk/drivers/char/watchdog/i8xx_tco.c index fa2ba9ebe42a..a13395e2c372 100644 --- a/trunk/drivers/char/watchdog/i8xx_tco.c +++ b/trunk/drivers/char/watchdog/i8xx_tco.c @@ -33,6 +33,11 @@ * 82801E (C-ICH) : document number 273599-001, 273645-002, * 82801EB (ICH5) : document number 252516-001, 252517-003, * 82801ER (ICH5R) : document number 252516-001, 252517-003, + * 82801FB (ICH6) : document number 301473-002, 301474-007, + * 82801FR (ICH6R) : document number 301473-002, 301474-007, + * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, + * 82801FW (ICH6W) : document number 301473-001, 301474-007, + * 82801FRW (ICH6RW) : document number 301473-001, 301474-007 * * 20000710 Nils Faerber * Initial Version 0.01 @@ -61,10 +66,6 @@ * 20050807 Wim Van Sebroeck * 0.08 Make sure that the watchdog is only "armed" when started. * (Kernel Bug 4251) - * 20060416 Wim Van Sebroeck - * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and - * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these - * chipsets) */ /* @@ -89,7 +90,7 @@ #include "i8xx_tco.h" /* Module and version information */ -#define TCO_VERSION "0.09" +#define TCO_VERSION "0.08" #define TCO_MODULE_NAME "i8xx TCO timer" #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION #define PFX TCO_MODULE_NAME ": " @@ -390,6 +391,11 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* End of list */ }; diff --git a/trunk/drivers/char/watchdog/s3c2410_wdt.c b/trunk/drivers/char/watchdog/s3c2410_wdt.c index 1ea04e9b2b0b..9dc54736e4eb 100644 --- a/trunk/drivers/char/watchdog/s3c2410_wdt.c +++ b/trunk/drivers/char/watchdog/s3c2410_wdt.c @@ -423,12 +423,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev) if (tmr_atboot && started == 0) { printk(KERN_INFO PFX "Starting Watchdog Timer\n"); s3c2410wdt_start(); - } else if (!tmr_atboot) { - /* if we're not enabling the watchdog, then ensure it is - * disabled if it has been left running from the bootloader - * or other source */ - - s3c2410wdt_stop(); } return 0; diff --git a/trunk/drivers/char/watchdog/sc1200wdt.c b/trunk/drivers/char/watchdog/sc1200wdt.c index 20b88f9b7be2..515ce7572049 100644 --- a/trunk/drivers/char/watchdog/sc1200wdt.c +++ b/trunk/drivers/char/watchdog/sc1200wdt.c @@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void) { int ret; - printk("%s\n", banner); + printk(banner); spin_lock_init(&sc1200wdt_lock); sema_init(&open_sem, 1); diff --git a/trunk/drivers/ieee1394/ohci1394.c b/trunk/drivers/ieee1394/ohci1394.c index 11f13778f139..19222878aae9 100644 --- a/trunk/drivers/ieee1394/ohci1394.c +++ b/trunk/drivers/ieee1394/ohci1394.c @@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci) * register content. * To actually enable physical responses is the job of our interrupt * handler which programs the physical request filter. */ - reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000); + reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); DBGMSG("physUpperBoundOffset=%08x", reg_read(ohci, OHCI1394_PhyUpperBound)); diff --git a/trunk/drivers/ieee1394/sbp2.c b/trunk/drivers/ieee1394/sbp2.c index 8a23fb54c693..f4206604db03 100644 --- a/trunk/drivers/ieee1394/sbp2.c +++ b/trunk/drivers/ieee1394/sbp2.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -118,8 +117,7 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default */ static int max_sectors = SBP2_MAX_SECTORS; module_param(max_sectors, int, 0444); -MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = " - __stringify(SBP2_MAX_SECTORS) ")"); +MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)"); /* * Exclusive login to sbp2 device? In most cases, the sbp2 driver should @@ -137,45 +135,18 @@ module_param(exclusive_login, int, 0644); MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); /* - * If any of the following workarounds is required for your device to work, - * please submit the kernel messages logged by sbp2 to the linux1394-devel - * mailing list. + * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on + * if your sbp2 device is not properly handling the SCSI inquiry command. + * This hack makes the inquiry look more like a typical MS Windows inquiry + * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8. * - * - 128kB max transfer - * Limit transfer size. Necessary for some old bridges. - * - * - 36 byte inquiry - * When scsi_mod probes the device, let the inquiry command look like that - * from MS Windows. - * - * - skip mode page 8 - * Suppress sending of mode_sense for mode page 8 if the device pretends to - * support the SCSI Primary Block commands instead of Reduced Block Commands. - * - * - fix capacity - * Tell sd_mod to correct the last sector number reported by read_capacity. - * Avoids access beyond actual disk limits on devices with an off-by-one bug. - * Don't use this with devices which don't have this bug. - * - * - override internal blacklist - * Instead of adding to the built-in blacklist, use only the workarounds - * specified in the module load parameter. - * Useful if a blacklist entry interfered with a non-broken device. + * If force_inquiry_hack=1 is required for your device to work, + * please submit the logged sbp2_firmware_revision value of this device to + * the linux1394-devel mailing list. */ -static int sbp2_default_workarounds; -module_param_named(workarounds, sbp2_default_workarounds, int, 0644); -MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" - ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) - ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) - ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) - ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) - ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) - ", or a combination)"); - -/* legacy parameter */ static int force_inquiry_hack; module_param(force_inquiry_hack, int, 0644); -MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'"); +MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); /* * Export information about protocols/devices supported by this driver. @@ -295,55 +266,14 @@ static struct hpsb_protocol_driver sbp2_driver = { }; /* - * List of devices with known bugs. - * - * The firmware_revision field, masked with 0xffff00, is the best indicator - * for the type of bridge chip of a device. It yields a few false positives - * but this did not break correctly behaving devices so far. + * List of device firmwares that require the inquiry hack. + * Yields a few false positives but did not break other devices so far. */ -static const struct { - u32 firmware_revision; - u32 model_id; - unsigned workarounds; -} sbp2_workarounds_table[] = { - /* TSB42AA9 */ { - .firmware_revision = 0x002800, - .workarounds = SBP2_WORKAROUND_INQUIRY_36 | - SBP2_WORKAROUND_MODE_SENSE_8, - }, - /* Initio bridges, actually only needed for some older ones */ { - .firmware_revision = 0x000200, - .workarounds = SBP2_WORKAROUND_INQUIRY_36, - }, - /* Symbios bridge */ { - .firmware_revision = 0xa0b800, - .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, - }, - /* - * Note about the following Apple iPod blacklist entries: - * - * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our - * matching logic treats 0 as a wildcard, we cannot match this ID - * without rewriting the matching routine. Fortunately these iPods - * do not feature the read_capacity bug according to one report. - * Read_capacity behaviour as well as model_id could change due to - * Apple-supplied firmware updates though. - */ - /* iPod 4th generation */ { - .firmware_revision = 0x0a2700, - .model_id = 0x000021, - .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, - }, - /* iPod mini */ { - .firmware_revision = 0x0a2700, - .model_id = 0x000023, - .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, - }, - /* iPod Photo */ { - .firmware_revision = 0x0a2700, - .model_id = 0x00007e, - .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, - } +static u32 sbp2_broken_inquiry_list[] = { + 0x00002800, /* Stefan Richter */ + /* DViCO Momobay CX-1 */ + 0x00000200 /* Andreas Plesch */ + /* QPS Fire DVDBurner */ }; /************************************** @@ -835,16 +765,11 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud /* Register the status FIFO address range. We could use the same FIFO * for targets at different nodes. However we need different FIFOs per - * target in order to support multi-unit devices. - * The FIFO is located out of the local host controller's physical range - * but, if possible, within the posted write area. Status writes will - * then be performed as unified transactions. This slightly reduces - * bandwidth usage, and some Prolific based devices seem to require it. - */ + * target in order to support multi-unit devices. */ scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( &sbp2_highlevel, ud->ne->host, &sbp2_ops, sizeof(struct sbp2_status_block), sizeof(quadlet_t), - 0x010000000000ULL, CSR1212_ALL_SPACE_END); + ~0ULL, ~0ULL); if (!scsi_id->status_fifo_addr) { SBP2_ERR("failed to allocate status FIFO address range"); goto failed_alloc; @@ -1525,8 +1450,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, struct csr1212_dentry *dentry; u64 management_agent_addr; u32 command_set_spec_id, command_set, unit_characteristics, - firmware_revision; - unsigned workarounds; + firmware_revision, workarounds; int i; SBP2_DEBUG_ENTER(); @@ -1582,8 +1506,12 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, case SBP2_FIRMWARE_REVISION_KEY: /* Firmware revision */ firmware_revision = kv->value.immediate; - SBP2_DEBUG("sbp2_firmware_revision = %x", - (unsigned int)firmware_revision); + if (force_inquiry_hack) + SBP2_INFO("sbp2_firmware_revision = %x", + (unsigned int)firmware_revision); + else + SBP2_DEBUG("sbp2_firmware_revision = %x", + (unsigned int)firmware_revision); break; default: @@ -1591,44 +1519,41 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, } } - workarounds = sbp2_default_workarounds; - if (force_inquiry_hack) { - SBP2_WARN("force_inquiry_hack is deprecated. " - "Use parameter 'workarounds' instead."); - workarounds |= SBP2_WORKAROUND_INQUIRY_36; - } + /* This is the start of our broken device checking. We try to hack + * around oddities and known defects. */ + workarounds = 0x0; - if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) - for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { - if (sbp2_workarounds_table[i].firmware_revision && - sbp2_workarounds_table[i].firmware_revision != - (firmware_revision & 0xffff00)) - continue; - if (sbp2_workarounds_table[i].model_id && - sbp2_workarounds_table[i].model_id != ud->model_id) - continue; - workarounds |= sbp2_workarounds_table[i].workarounds; - break; + /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a + * bridge with 128KB max transfer size limitation. For sanity, we + * only voice this when the current max_sectors setting + * exceeds the 128k limit. By default, that is not the case. + * + * It would be really nice if we could detect this before the scsi + * host gets initialized. That way we can down-force the + * max_sectors to account for it. That is not currently + * possible. */ + if ((firmware_revision & 0xffff00) == + SBP2_128KB_BROKEN_FIRMWARE && + (max_sectors * 512) > (128*1024)) { + SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.", + NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); + SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!", + max_sectors); + workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER; + } + + /* Check for a blacklisted set of devices that require us to force + * a 36 byte host inquiry. This can be overriden as a module param + * (to force all hosts). */ + for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) { + if ((firmware_revision & 0xffff00) == + sbp2_broken_inquiry_list[i]) { + SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround", + NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); + workarounds |= SBP2_BREAKAGE_INQUIRY_HACK; + break; /* No need to continue. */ } - - if (workarounds) - SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x " - "(firmware_revision 0x%06x, vendor_id 0x%06x," - " model_id 0x%06x)", - NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), - workarounds, firmware_revision, - ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, - ud->model_id); - - /* We would need one SCSI host template for each target to adjust - * max_sectors on the fly, therefore warn only. */ - if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && - (max_sectors * 512) > (128 * 1024)) - SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB " - "max transfer size. WARNING: Current max_sectors " - "setting is larger than 128KB (%d sectors)", - NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), - max_sectors); + } /* If this is a logical unit directory entry, process the parent * to get the values. */ @@ -2522,25 +2447,19 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) scsi_id->sdev = sdev; - if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) + if (force_inquiry_hack || + scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) { sdev->inquiry_len = 36; + sdev->skip_ms_page_8 = 1; + } return 0; } static int sbp2scsi_slave_configure(struct scsi_device *sdev) { - struct scsi_id_instance_data *scsi_id = - (struct scsi_id_instance_data *)sdev->host->hostdata[0]; - blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; - - if (sdev->type == TYPE_DISK && - scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) - sdev->skip_ms_page_8 = 1; - if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) - sdev->fix_capacity = 1; return 0; } @@ -2684,9 +2603,7 @@ static int sbp2_module_init(void) scsi_driver_template.cmd_per_lun = 1; } - if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && - (max_sectors * 512) > (128 * 1024)) - max_sectors = 128 * 1024 / 512; + /* Set max sectors (module load option). Default is 255 sectors. */ scsi_driver_template.max_sectors = max_sectors; /* Register our high level driver with 1394 stack */ diff --git a/trunk/drivers/ieee1394/sbp2.h b/trunk/drivers/ieee1394/sbp2.h index f4ccc9d0fba4..e2d357a9ea3a 100644 --- a/trunk/drivers/ieee1394/sbp2.h +++ b/trunk/drivers/ieee1394/sbp2.h @@ -226,6 +226,11 @@ struct sbp2_status_block { #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e #define SBP2_SW_VERSION_ENTRY 0x00010483 +/* + * Other misc defines + */ +#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 + /* * SCSI specific stuff */ @@ -234,13 +239,6 @@ struct sbp2_status_block { #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ #define SBP2_MAX_CMDS 8 /* This should be safe */ -/* Flags for detected oddities and brokeness */ -#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 -#define SBP2_WORKAROUND_INQUIRY_36 0x2 -#define SBP2_WORKAROUND_MODE_SENSE_8 0x4 -#define SBP2_WORKAROUND_FIX_CAPACITY 0x8 -#define SBP2_WORKAROUND_OVERRIDE 0x100 - /* This is the two dma types we use for cmd_dma below */ enum cmd_dma_types { CMD_DMA_NONE, @@ -270,6 +268,10 @@ struct sbp2_command_info { }; +/* A list of flags for detected oddities and brokeness. */ +#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 +#define SBP2_BREAKAGE_INQUIRY_HACK 0x2 + struct sbp2scsi_host_info; /* @@ -343,7 +345,7 @@ struct scsi_id_instance_data { struct Scsi_Host *scsi_host; /* Device specific workarounds/brokeness */ - unsigned workarounds; + u32 workarounds; }; /* Sbp2 host data structure (one per IEEE1394 host) */ diff --git a/trunk/drivers/infiniband/core/uverbs_mem.c b/trunk/drivers/infiniband/core/uverbs_mem.c index efe147dbeb42..36a32c315668 100644 --- a/trunk/drivers/infiniband/core/uverbs_mem.c +++ b/trunk/drivers/infiniband/core/uverbs_mem.c @@ -211,10 +211,8 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) */ work = kmalloc(sizeof *work, GFP_KERNEL); - if (!work) { - mmput(mm); + if (!work) return; - } INIT_WORK(&work->work, ib_umem_account, work); work->mm = mm; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_cmd.c b/trunk/drivers/infiniband/hw/mthca/mthca_cmd.c index 798e13e14faf..1985b5dfa481 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -182,7 +182,7 @@ struct mthca_cmd_context { u8 status; }; -static int fw_cmd_doorbell = 0; +static int fw_cmd_doorbell = 1; module_param(fw_cmd_doorbell, int, 0644); MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " "(and supported by FW)"); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index 07c13be07a4a..19765f6f8d58 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1727,7 +1727,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ind = qp->rq.next_ind; - for (nreq = 0; wr; wr = wr->next) { + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cpu_to_be32(qp->qpn << 8); + + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + + qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; + size0 = 0; + } + if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, @@ -1781,23 +1797,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; - - ++nreq; - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { - nreq = 0; - - doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); - doorbell[1] = cpu_to_be32(qp->qpn << 8); - - wmb(); - - mthca_write64(doorbell, - dev->kar + MTHCA_RECEIVE_DOORBELL, - MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); - - qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; - size0 = 0; - } } out: diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index 9cbdffa08dc2..c32ce4348e1b 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -340,10 +340,7 @@ static void srp_disconnect_target(struct srp_target_port *target) /* XXX should send SRP_I_LOGOUT request */ init_completion(&target->done); - if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { - printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); - return; - } + ib_send_cm_dreq(target->cm_id, NULL, 0); wait_for_completion(&target->done); } @@ -354,6 +351,7 @@ static void srp_remove_work(void *target_ptr) spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_DEAD) { spin_unlock_irq(target->scsi_host->host_lock); + scsi_host_put(target->scsi_host); return; } target->state = SRP_TARGET_REMOVED; @@ -367,6 +365,8 @@ static void srp_remove_work(void *target_ptr) ib_destroy_cm_id(target->cm_id); srp_free_target_ib(target); scsi_host_put(target->scsi_host); + /* And another put to really free the target port... */ + scsi_host_put(target->scsi_host); } static int srp_connect_target(struct srp_target_port *target) @@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) list_for_each_entry_safe(req, tmp, &target->req_queue, list) if (req->scmnd->device == scmnd->device) { req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); + scmnd->scsi_done(scmnd); srp_remove_req(target, req); } diff --git a/trunk/drivers/mmc/au1xmmc.c b/trunk/drivers/mmc/au1xmmc.c index 5dc4bee7abeb..914d62b24064 100644 --- a/trunk/drivers/mmc/au1xmmc.c +++ b/trunk/drivers/mmc/au1xmmc.c @@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) } else data->bytes_xfered = - (data->blocks * data->blksz) - + (data->blocks * (1 << data->blksz_bits)) - host->pio.len; } @@ -575,7 +575,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) { - int datalen = data->blocks * data->blksz; + int datalen = data->blocks * (1 << data->blksz_bits); if (dma != 0) host->flags |= HOST_F_DMA; @@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) if (host->dma.len == 0) return MMC_ERR_TIMEOUT; - au_writel(data->blksz - 1, HOST_BLKSIZE(host)); + au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host)); if (host->flags & HOST_F_DMA) { int i; diff --git a/trunk/drivers/mmc/imxmmc.c b/trunk/drivers/mmc/imxmmc.c index a4eb1d0e7a71..79358e223f57 100644 --- a/trunk/drivers/mmc/imxmmc.c +++ b/trunk/drivers/mmc/imxmmc.c @@ -218,10 +218,8 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host, if(!loops) return 0; - /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ - if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) - dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", - loops, where, *pstat, stat_mask); + dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", + loops, where, *pstat, stat_mask); return loops; } @@ -335,9 +333,6 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, WARN_ON(host->cmd != NULL); host->cmd = cmd; - /* Ensure, that clock are stopped else command programming and start fails */ - imxmci_stop_clock(host); - if (cmd->flags & MMC_RSP_BUSY) cmdat |= CMD_DAT_CONT_BUSY; @@ -558,7 +553,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) int trans_done = 0; unsigned int stat = *pstat; - if(host->actual_bus_width != MMC_BUS_WIDTH_4) + if(host->actual_bus_width == MMC_BUS_WIDTH_4) burst_len = 16; else burst_len = 64; @@ -596,7 +591,8 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) stat = MMC_STATUS; /* Flush extra bytes from FIFO */ - while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ + while(flush_len >= 2){ + flush_len -= 2; i = MMC_BUFFER_ACCESS; stat = MMC_STATUS; stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ @@ -750,6 +746,10 @@ static void imxmci_tasklet_fnc(unsigned long data) data_dir_mask = STATUS_DATA_TRANS_DONE; } + imxmci_busy_wait_for_status(host, &stat, + data_dir_mask, + 50, "imxmci_tasklet_fnc data"); + if(stat & data_dir_mask) { clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); imxmci_data_done(host, stat); @@ -865,11 +865,7 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) imxmci_stop_clock(host); MMC_CLK_RATE = (prescaler<<3) | clk; - /* - * Under my understanding, clock should not be started there, because it would - * initiate SDHC sequencer and send last or random command into card - */ - /*imxmci_start_clock(host);*/ + imxmci_start_clock(host); dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); } else { diff --git a/trunk/drivers/mmc/mmc.c b/trunk/drivers/mmc/mmc.c index 6201f3086a02..1ca2c8b9c9b5 100644 --- a/trunk/drivers/mmc/mmc.c +++ b/trunk/drivers/mmc/mmc.c @@ -951,7 +951,6 @@ static void mmc_read_scrs(struct mmc_host *host) data.timeout_ns = card->csd.tacc_ns * 10; data.timeout_clks = card->csd.tacc_clks * 10; data.blksz_bits = 3; - data.blksz = 1 << 3; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; diff --git a/trunk/drivers/mmc/mmc_block.c b/trunk/drivers/mmc/mmc_block.c index e39cc05c64c2..06bd1f4cb9b1 100644 --- a/trunk/drivers/mmc/mmc_block.c +++ b/trunk/drivers/mmc/mmc_block.c @@ -175,7 +175,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) brq.data.timeout_ns = card->csd.tacc_ns * 10; brq.data.timeout_clks = card->csd.tacc_clks * 10; brq.data.blksz_bits = md->block_bits; - brq.data.blksz = 1 << md->block_bits; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; diff --git a/trunk/drivers/mmc/pxamci.c b/trunk/drivers/mmc/pxamci.c index b49368fd96b8..f97b472085cb 100644 --- a/trunk/drivers/mmc/pxamci.c +++ b/trunk/drivers/mmc/pxamci.c @@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) nob = 0xffff; writel(nob, host->base + MMC_NOB); - writel(data->blksz, host->base + MMC_BLKLEN); + writel(1 << data->blksz_bits, host->base + MMC_BLKLEN); clks = (unsigned long long)data->timeout_ns * CLOCKRATE; do_div(clks, 1000000000UL); @@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) * data blocks as being in error. */ if (data->error == MMC_ERR_NONE) - data->bytes_xfered = data->blocks * data->blksz; + data->bytes_xfered = data->blocks << data->blksz_bits; else data->bytes_xfered = 0; diff --git a/trunk/drivers/mmc/wbsd.c b/trunk/drivers/mmc/wbsd.c index 8167332d4013..39b3d97f891e 100644 --- a/trunk/drivers/mmc/wbsd.c +++ b/trunk/drivers/mmc/wbsd.c @@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) unsigned long dmaflags; DBGF("blksz %04x blks %04x flags %08x\n", - data->blksz, data->blocks, data->flags); + 1 << data->blksz_bits, data->blocks, data->flags); DBGF("tsac %d ms nsac %d clk\n", data->timeout_ns / 1000000, data->timeout_clks); /* * Calculate size. */ - host->size = data->blocks * data->blksz; + host->size = data->blocks << data->blksz_bits; /* * Check timeout values for overflow. @@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) * Two bytes are needed for each data line. */ if (host->bus_width == MMC_BUS_WIDTH_1) { - blksize = data->blksz + 2; + blksize = (1 << data->blksz_bits) + 2; wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); } else if (host->bus_width == MMC_BUS_WIDTH_4) { - blksize = data->blksz + 2 * 4; + blksize = (1 << data->blksz_bits) + 2 * 4; wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); diff --git a/trunk/drivers/net/forcedeth.c b/trunk/drivers/net/forcedeth.c index 7e078b4cca7c..f7235c9bc421 100644 --- a/trunk/drivers/net/forcedeth.c +++ b/trunk/drivers/net/forcedeth.c @@ -106,6 +106,7 @@ * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.52: 20 Jan 2006: Add MSI/MSIX support. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. + * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -117,7 +118,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ -#define FORCEDETH_VERSION "0.53" +#define FORCEDETH_VERSION "0.54" #define DRV_NAME "forcedeth" #include @@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) } } +static int using_multi_irqs(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED) || + ((np->msi_flags & NV_MSI_X_ENABLED) && + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) + return 0; + else + return 1; +} + +static void nv_enable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq(dev->irq); + } else { + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +static void nv_disable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq(dev->irq); + } else { + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +/* In MSIX mode, a write to irqmask behaves as XOR */ +static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) +{ + u8 __iomem *base = get_hwbase(dev); + + writel(mask, base + NvRegIrqMask); +} + +static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (np->msi_flags & NV_MSI_X_ENABLED) { + writel(mask, base + NvRegIrqMask); + } else { + if (np->msi_flags & NV_MSI_ENABLED) + writel(0, base + NvRegMSIIrqMask); + writel(0, base + NvRegIrqMask); + } +} + #define MII_READ (-1) /* mii_rw: read/write a register on the PHY. * @@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data) struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); - - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { - disable_irq(dev->irq); + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } if (nv_alloc_rx(dev)) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); } - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { - enable_irq(dev->irq); + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } @@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) * guessed, there is probably a simpler approach. * Changing the MTU is a rare event, it shouldn't matter. */ - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { - disable_irq(dev->irq); - } else { - disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); - disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); - disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); - } + nv_disable_irq(dev); spin_lock_bh(&dev->xmit_lock); spin_lock(&np->lock); /* stop engines */ @@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) nv_start_tx(dev); spin_unlock(&np->lock); spin_unlock_bh(&dev->xmit_lock); - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { - enable_irq(dev->irq); - } else { - enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); - enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); - enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); - } + nv_enable_irq(dev); } return 0; } @@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) if (!(events & np->irqmask)) break; - spin_lock(&np->lock); + spin_lock_irq(&np->lock); nv_tx_done(dev); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); if (events & (NVREG_IRQ_TX_ERR)) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (i > max_interrupt_work) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); pci_push(base); @@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); break; } @@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) nv_rx_process(dev); if (nv_alloc_rx(dev)) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); } if (i > max_interrupt_work) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); @@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); break; } @@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) break; if (events & NVREG_IRQ_LINK) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); nv_link_irq(dev); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); } if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); nv_linkchange(dev); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); np->link_timeout = jiffies + LINK_TIMEOUT; } if (events & (NVREG_IRQ_UNKNOWN)) { @@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) dev->name, events); } if (i > max_interrupt_work) { - spin_lock(&np->lock); + spin_lock_irq(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); @@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); - spin_unlock(&np->lock); + spin_unlock_irq(&np->lock); break; } @@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data) * nv_nic_irq because that may decide to do otherwise */ - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { - disable_irq(dev->irq); + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq(dev->irq); mask = np->irqmask; } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { @@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data) writel(mask, base + NvRegIrqMask); pci_push(base); - if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { + if (!using_multi_irqs(dev)) { nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); - enable_irq(dev->irq); + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq(dev->irq); } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); @@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); } +static int nv_request_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int i; + + if (np->msi_flags & NV_MSI_X_CAPABLE) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { + np->msi_x_entry[i].entry = i; + } + if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { + np->msi_flags |= NV_MSI_X_ENABLED; + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { + /* Request irq for rx handling */ + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + /* Request irq for tx handling */ + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_rx; + } + /* Request irq for link and timer handling */ + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_tx; + } + /* map interrupts to their respective vector */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); + } else { + /* Request irq for all interrupts */ + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + } + } + } + if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { + if ((ret = pci_enable_msi(np->pci_dev)) == 0) { + np->msi_flags |= NV_MSI_ENABLED; + if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIMap0); + writel(0, base + NvRegMSIMap1); + /* enable msi vector 0 */ + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + } + } + if (ret != 0) { + if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) + goto out_err; + } + + return 0; +out_free_tx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); +out_free_rx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); +out_err: + return 1; +} + +static void nv_free_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + int i; + + if (np->msi_flags & NV_MSI_X_ENABLED) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { + free_irq(np->msi_x_entry[i].vector, dev); + } + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + } else { + free_irq(np->pci_dev->irq, dev); + if (np->msi_flags & NV_MSI_ENABLED) { + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + } + } +} + static int nv_open(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); @@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev) udelay(10); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); - writel(0, base + NvRegIrqMask); + nv_disable_hw_interrupts(dev, np->irqmask); pci_push(base); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); pci_push(base); + if (nv_request_irq(dev)) { + goto out_drain; + } + if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; @@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev) } /* ask for interrupts */ - writel(np->irqmask, base + NvRegIrqMask); + nv_enable_hw_interrupts(dev, np->irqmask); spin_lock_irq(&np->lock); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); @@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base; - int i; spin_lock_irq(&np->lock); np->in_shutdown = 1; @@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev) /* disable interrupts on the nic or we will lock up */ base = get_hwbase(dev); - if (np->msi_flags & NV_MSI_X_ENABLED) { - writel(np->irqmask, base + NvRegIrqMask); - } else { - if (np->msi_flags & NV_MSI_ENABLED) - writel(0, base + NvRegMSIIrqMask); - writel(0, base + NvRegIrqMask); - } + nv_disable_hw_interrupts(dev, np->irqmask); pci_push(base); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); spin_unlock_irq(&np->lock); - if (np->msi_flags & NV_MSI_X_ENABLED) { - for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { - free_irq(np->msi_x_entry[i].vector, dev); - } - pci_disable_msix(np->pci_dev); - np->msi_flags &= ~NV_MSI_X_ENABLED; - } else { - free_irq(np->pci_dev->irq, dev); - if (np->msi_flags & NV_MSI_ENABLED) { - pci_disable_msi(np->pci_dev); - np->msi_flags &= ~NV_MSI_ENABLED; - } - } + nv_free_irq(dev); drain_ring(dev); @@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ np->desc_ver = DESC_VER_3; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); } else { - if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { - printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", - pci_name(pci_dev)); - goto out_relreg; - } else { - dev->features |= NETIF_F_HIGHDMA; - printk(KERN_INFO "forcedeth: using HIGHDMA\n"); - } + dev->features |= NETIF_F_HIGHDMA; + printk(KERN_INFO "forcedeth: using HIGHDMA\n"); + } + if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { + printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", + pci_name(pci_dev)); } - np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ np->desc_ver = DESC_VER_2; diff --git a/trunk/drivers/net/ixp2000/enp2611.c b/trunk/drivers/net/ixp2000/enp2611.c index b67f586d7392..6f7dce8eba51 100644 --- a/trunk/drivers/net/ixp2000/enp2611.c +++ b/trunk/drivers/net/ixp2000/enp2611.c @@ -149,8 +149,6 @@ static void enp2611_check_link_status(unsigned long __dummy) int status; dev = nds[i]; - if (dev == NULL) - continue; status = pm3386_is_link_up(i); if (status && !netif_carrier_ok(dev)) { @@ -193,7 +191,6 @@ static void enp2611_set_port_admin_status(int port, int up) static int __init enp2611_init_module(void) { - int ports; int i; if (!machine_is_enp2611()) @@ -202,8 +199,7 @@ static int __init enp2611_init_module(void) caleb_reset(); pm3386_reset(); - ports = pm3386_port_count(); - for (i = 0; i < ports; i++) { + for (i = 0; i < 3; i++) { nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); if (nds[i] == NULL) { while (--i >= 0) @@ -219,10 +215,9 @@ static int __init enp2611_init_module(void) ixp2400_msf_init(&enp2611_msf_parameters); - if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { - for (i = 0; i < ports; i++) - if (nds[i]) - free_netdev(nds[i]); + if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { + for (i = 0; i < 3; i++) + free_netdev(nds[i]); return -EINVAL; } diff --git a/trunk/drivers/net/ixp2000/pm3386.c b/trunk/drivers/net/ixp2000/pm3386.c index 5224651c9aac..5c7ab7564053 100644 --- a/trunk/drivers/net/ixp2000/pm3386.c +++ b/trunk/drivers/net/ixp2000/pm3386.c @@ -86,53 +86,40 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value) pm3386_reg_write(port >> 1, reg, value); } -int pm3386_secondary_present(void) -{ - return pm3386_reg_read(1, 0) == 0x3386; -} void pm3386_reset(void) { u8 mac[3][6]; - int secondary; - - secondary = pm3386_secondary_present(); /* Save programmed MAC addresses. */ pm3386_get_mac(0, mac[0]); pm3386_get_mac(1, mac[1]); - if (secondary) - pm3386_get_mac(2, mac[2]); + pm3386_get_mac(2, mac[2]); /* Assert analog and digital reset. */ pm3386_reg_write(0, 0x002, 0x0060); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0060); + pm3386_reg_write(1, 0x002, 0x0060); mdelay(1); /* Deassert analog reset. */ pm3386_reg_write(0, 0x002, 0x0062); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0062); + pm3386_reg_write(1, 0x002, 0x0062); mdelay(10); /* Deassert digital reset. */ pm3386_reg_write(0, 0x002, 0x0063); - if (secondary) - pm3386_reg_write(1, 0x002, 0x0063); + pm3386_reg_write(1, 0x002, 0x0063); mdelay(10); /* Restore programmed MAC addresses. */ pm3386_set_mac(0, mac[0]); pm3386_set_mac(1, mac[1]); - if (secondary) - pm3386_set_mac(2, mac[2]); + pm3386_set_mac(2, mac[2]); /* Disable carrier on all ports. */ pm3386_set_carrier(0, 0); pm3386_set_carrier(1, 0); - if (secondary) - pm3386_set_carrier(2, 0); + pm3386_set_carrier(2, 0); } static u16 swaph(u16 x) @@ -140,11 +127,6 @@ static u16 swaph(u16 x) return ((x << 8) | (x >> 8)) & 0xffff; } -int pm3386_port_count(void) -{ - return 2 + pm3386_secondary_present(); -} - void pm3386_init_port(int port) { int pm = port >> 1; diff --git a/trunk/drivers/net/ixp2000/pm3386.h b/trunk/drivers/net/ixp2000/pm3386.h index cc4183dca911..fe92bb056ac4 100644 --- a/trunk/drivers/net/ixp2000/pm3386.h +++ b/trunk/drivers/net/ixp2000/pm3386.h @@ -13,7 +13,6 @@ #define __PM3386_H void pm3386_reset(void); -int pm3386_port_count(void); void pm3386_init_port(int port); void pm3386_get_mac(int port, u8 *mac); void pm3386_set_mac(int port, u8 *mac); diff --git a/trunk/drivers/net/skge.c b/trunk/drivers/net/skge.c index 63a6b3dfc095..a70c2b0cc104 100644 --- a/trunk/drivers/net/skge.c +++ b/trunk/drivers/net/skge.c @@ -78,7 +78,8 @@ static const struct pci_device_id skge_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index 60779ebf2ff6..ffd267fab21d 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.4" +#define DRV_VERSION "1.3" #define PFX DRV_NAME " " /* @@ -105,7 +105,6 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms) static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, @@ -236,7 +235,6 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) } if (hw->chip_id == CHIP_ID_YUKON_EC_U) { - sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); reg1 &= P_ASPM_CONTROL_MSK; @@ -308,7 +306,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) u16 ctrl, ct1000, adv, pg, ledctrl, ledover; if (sky2->autoneg == AUTONEG_ENABLE && - !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { + (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | @@ -1022,25 +1020,7 @@ static int sky2_up(struct net_device *dev) struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 ramsize, rxspace, imask; - int cap, err = -ENOMEM; - struct net_device *otherdev = hw->dev[sky2->port^1]; - - /* - * On dual port PCI-X card, there is an problem where status - * can be received out of order due to split transactions - */ - if (otherdev && netif_running(otherdev) && - (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { - struct sky2_port *osky2 = netdev_priv(otherdev); - u16 cmd; - - cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); - cmd &= ~PCI_X_CMD_MAX_SPLIT; - sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); - - sky2->rx_csum = 0; - osky2->rx_csum = 0; - } + int err = -ENOMEM; if (netif_msg_ifup(sky2)) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); @@ -1919,12 +1899,6 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } } -/* Is status ring empty or is there more to do? */ -static inline int sky2_more_work(const struct sky2_hw *hw) -{ - return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); -} - /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do) { @@ -2197,19 +2171,19 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (status & Y2_IS_CHK_TXA2) sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); + if (status & Y2_IS_STAT_BMU) + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + work_done = sky2_status_intr(hw, work_limit); *budget -= work_done; dev0->quota -= work_done; - if (status & Y2_IS_STAT_BMU) - sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); - - if (sky2_more_work(hw)) + if (work_done >= work_limit) return 1; netif_rx_complete(dev0); - sky2_read32(hw, B0_Y2_SP_LISR); + status = sky2_read32(hw, B0_Y2_SP_LISR); return 0; } @@ -3093,7 +3067,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); - sky2->rx_csum = 1; + + /* Receive checksum disabled for Yukon XL + * because of observed problems with incorrect + * values when multiple packets are received in one interrupt + */ + sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; diff --git a/trunk/drivers/net/sky2.h b/trunk/drivers/net/sky2.h index 8a0bc5525f0a..8012994c9b93 100644 --- a/trunk/drivers/net/sky2.h +++ b/trunk/drivers/net/sky2.h @@ -214,8 +214,6 @@ enum csr_regs { enum { Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ - Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ - Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ diff --git a/trunk/drivers/net/via-rhine.c b/trunk/drivers/net/via-rhine.c index fdc21037f6dc..a6dc53b4250d 100644 --- a/trunk/drivers/net/via-rhine.c +++ b/trunk/drivers/net/via-rhine.c @@ -491,6 +491,8 @@ struct rhine_private { u8 tx_thresh, rx_thresh; struct mii_if_info mii_if; + struct work_struct tx_timeout_task; + struct work_struct check_media_task; void __iomem *base; }; @@ -498,6 +500,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int rhine_open(struct net_device *dev); static void rhine_tx_timeout(struct net_device *dev); +static void rhine_tx_timeout_task(struct net_device *dev); +static void rhine_check_media_task(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void rhine_tx(struct net_device *dev); @@ -852,6 +856,12 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; + INIT_WORK(&rp->tx_timeout_task, + (void (*)(void *))rhine_tx_timeout_task, dev); + + INIT_WORK(&rp->check_media_task, + (void (*)(void *))rhine_check_media_task, dev); + /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) @@ -1098,6 +1108,11 @@ static void rhine_set_carrier(struct mii_if_info *mii) netif_carrier_ok(mii->dev)); } +static void rhine_check_media_task(struct net_device *dev) +{ + rhine_check_media(dev, 0); +} + static void init_registers(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); @@ -1151,8 +1166,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) if (quirks & rqRhineI) { iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR - /* Can be called from ISR. Evil. */ - mdelay(1); + /* Do not call from ISR! */ + msleep(1); /* 0x80 must be set immediately before turning it off */ iowrite8(0x80, ioaddr + MIICmd); @@ -1240,6 +1255,16 @@ static int rhine_open(struct net_device *dev) } static void rhine_tx_timeout(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + + /* + * Move bulk of work outside of interrupt context + */ + schedule_work(&rp->tx_timeout_task); +} + +static void rhine_tx_timeout_task(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; @@ -1652,7 +1677,7 @@ static void rhine_error(struct net_device *dev, int intr_status) spin_lock(&rp->lock); if (intr_status & IntrLinkChange) - rhine_check_media(dev, 0); + schedule_work(&rp->check_media_task); if (intr_status & IntrStatsMax) { rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); @@ -1902,6 +1927,9 @@ static int rhine_close(struct net_device *dev) spin_unlock_irq(&rp->lock); free_irq(rp->pdev->irq, dev); + + flush_scheduled_work(); + free_rbufs(dev); free_tbufs(dev); free_ring(dev); diff --git a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c index e2982a83ae42..7ed18cad29f7 100644 --- a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3271,6 +3271,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) bcm43xx_sysfs_register(bcm); //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... + /*FIXME: This should be handled by softmac instead. */ + schedule_work(&bcm->softmac->associnfo.work); + assert(err == 0); out: return err; @@ -3946,9 +3949,6 @@ static int bcm43xx_resume(struct pci_dev *pdev) netif_device_attach(net_dev); - /*FIXME: This should be handled by softmac instead. */ - schedule_work(&bcm->softmac->associnfo.work); - dprintk(KERN_INFO PFX "Device resumed.\n"); return 0; diff --git a/trunk/fs/Makefile b/trunk/fs/Makefile index 078d3d1191a5..83bf478e786b 100644 --- a/trunk/fs/Makefile +++ b/trunk/fs/Makefile @@ -45,7 +45,6 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o obj-$(CONFIG_PROC_FS) += proc/ obj-y += partitions/ obj-$(CONFIG_SYSFS) += sysfs/ -obj-$(CONFIG_CONFIGFS_FS) += configfs/ obj-y += devpts/ obj-$(CONFIG_PROFILING) += dcookies.o @@ -101,4 +100,5 @@ obj-$(CONFIG_BEFS_FS) += befs/ obj-$(CONFIG_HOSTFS) += hostfs/ obj-$(CONFIG_HPPFS) += hppfs/ obj-$(CONFIG_DEBUG_FS) += debugfs/ +obj-$(CONFIG_CONFIGFS_FS) += configfs/ obj-$(CONFIG_OCFS2_FS) += ocfs2/ diff --git a/trunk/fs/configfs/dir.c b/trunk/fs/configfs/dir.c index 5f952187fc53..5638c8f9362f 100644 --- a/trunk/fs/configfs/dir.c +++ b/trunk/fs/configfs/dir.c @@ -505,15 +505,13 @@ static int populate_groups(struct config_group *group) int i; if (group->default_groups) { - /* - * FYI, we're faking mkdir here + /* FYI, we're faking mkdir here * I'm not sure we need this semaphore, as we're called * from our parent's mkdir. That holds our parent's * i_mutex, so afaik lookup cannot continue through our * parent to find us, let alone mess with our tree. * That said, taking our i_mutex is closer to mkdir - * emulation, and shouldn't hurt. - */ + * emulation, and shouldn't hurt. */ mutex_lock(&dentry->d_inode->i_mutex); for (i = 0; group->default_groups[i]; i++) { @@ -548,34 +546,20 @@ static void unlink_obj(struct config_item *item) item->ci_group = NULL; item->ci_parent = NULL; - - /* Drop the reference for ci_entry */ config_item_put(item); - /* Drop the reference for ci_parent */ config_group_put(group); } } static void link_obj(struct config_item *parent_item, struct config_item *item) { - /* - * Parent seems redundant with group, but it makes certain - * traversals much nicer. - */ + /* Parent seems redundant with group, but it makes certain + * traversals much nicer. */ item->ci_parent = parent_item; - - /* - * We hold a reference on the parent for the child's ci_parent - * link. - */ item->ci_group = config_group_get(to_config_group(parent_item)); list_add_tail(&item->ci_entry, &item->ci_group->cg_children); - /* - * We hold a reference on the child for ci_entry on the parent's - * cg_children - */ config_item_get(item); } @@ -700,10 +684,6 @@ static void client_drop_item(struct config_item *parent_item, type = parent_item->ci_type; BUG_ON(!type); - /* - * If ->drop_item() exists, it is responsible for the - * config_item_put(). - */ if (type->ct_group_ops && type->ct_group_ops->drop_item) type->ct_group_ops->drop_item(to_config_group(parent_item), item); @@ -714,28 +694,23 @@ static void client_drop_item(struct config_item *parent_item, static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { - int ret, module_got = 0; + int ret; struct config_group *group; struct config_item *item; struct config_item *parent_item; struct configfs_subsystem *subsys; struct configfs_dirent *sd; struct config_item_type *type; - struct module *owner = NULL; + struct module *owner; char *name; - if (dentry->d_parent == configfs_sb->s_root) { - ret = -EPERM; - goto out; - } + if (dentry->d_parent == configfs_sb->s_root) + return -EPERM; sd = dentry->d_parent->d_fsdata; - if (!(sd->s_type & CONFIGFS_USET_DIR)) { - ret = -EPERM; - goto out; - } + if (!(sd->s_type & CONFIGFS_USET_DIR)) + return -EPERM; - /* Get a working ref for the duration of this function */ parent_item = configfs_get_config_item(dentry->d_parent); type = parent_item->ci_type; subsys = to_config_group(parent_item)->cg_subsys; @@ -744,16 +719,15 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (!type || !type->ct_group_ops || (!type->ct_group_ops->make_group && !type->ct_group_ops->make_item)) { - ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ - goto out_put; + config_item_put(parent_item); + return -EPERM; /* What lack-of-mkdir returns */ } name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); if (!name) { - ret = -ENOMEM; - goto out_put; + config_item_put(parent_item); + return -ENOMEM; } - snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); down(&subsys->su_sem); @@ -774,67 +748,40 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) kfree(name); if (!item) { - /* - * If item == NULL, then link_obj() was never called. - * There are no extra references to clean up. - */ - ret = -ENOMEM; - goto out_put; + config_item_put(parent_item); + return -ENOMEM; } - /* - * link_obj() has been called (via link_group() for groups). - * From here on out, errors must clean that up. - */ - + ret = -EINVAL; type = item->ci_type; - if (!type) { - ret = -EINVAL; - goto out_unlink; - } - - owner = type->ct_owner; - if (!try_module_get(owner)) { - ret = -EINVAL; - goto out_unlink; - } - - /* - * I hate doing it this way, but if there is - * an error, module_put() probably should - * happen after any cleanup. - */ - module_got = 1; + if (type) { + owner = type->ct_owner; + if (try_module_get(owner)) { + if (group) { + ret = configfs_attach_group(parent_item, + item, + dentry); + } else { + ret = configfs_attach_item(parent_item, + item, + dentry); + } - if (group) - ret = configfs_attach_group(parent_item, item, dentry); - else - ret = configfs_attach_item(parent_item, item, dentry); + if (ret) { + down(&subsys->su_sem); + if (group) + unlink_group(group); + else + unlink_obj(item); + client_drop_item(parent_item, item); + up(&subsys->su_sem); -out_unlink: - if (ret) { - /* Tear down everything we built up */ - down(&subsys->su_sem); - if (group) - unlink_group(group); - else - unlink_obj(item); - client_drop_item(parent_item, item); - up(&subsys->su_sem); - - if (module_got) - module_put(owner); + config_item_put(parent_item); + module_put(owner); + } + } } -out_put: - /* - * link_obj()/link_group() took a reference from child->parent, - * so the parent is safely pinned. We can drop our working - * reference. - */ - config_item_put(parent_item); - -out: return ret; } @@ -854,7 +801,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) if (sd->s_type & CONFIGFS_USET_DEFAULT) return -EPERM; - /* Get a working ref until we have the child */ parent_item = configfs_get_config_item(dentry->d_parent); subsys = to_config_group(parent_item)->cg_subsys; BUG_ON(!subsys); @@ -871,7 +817,6 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) return ret; } - /* Get a working ref for the duration of this function */ item = configfs_get_config_item(dentry); /* Drop reference from above, item already holds one. */ diff --git a/trunk/fs/ocfs2/aops.c b/trunk/fs/ocfs2/aops.c index 47152bf9a7f2..0d858d0b25be 100644 --- a/trunk/fs/ocfs2/aops.c +++ b/trunk/fs/ocfs2/aops.c @@ -276,29 +276,13 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) return ret; } -/* This can also be called from ocfs2_write_zero_page() which has done - * it's own cluster locking. */ -int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, - unsigned from, unsigned to) -{ - int ret; - - down_read(&OCFS2_I(inode)->ip_alloc_sem); - - ret = block_prepare_write(page, from, to, ocfs2_get_block); - - up_read(&OCFS2_I(inode)->ip_alloc_sem); - - return ret; -} - /* * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called * from loopback. It must be able to perform its own locking around * ocfs2_get_block(). */ -static int ocfs2_prepare_write(struct file *file, struct page *page, - unsigned from, unsigned to) +int ocfs2_prepare_write(struct file *file, struct page *page, + unsigned from, unsigned to) { struct inode *inode = page->mapping->host; int ret; @@ -311,7 +295,11 @@ static int ocfs2_prepare_write(struct file *file, struct page *page, goto out; } - ret = ocfs2_prepare_write_nolock(inode, page, from, to); + down_read(&OCFS2_I(inode)->ip_alloc_sem); + + ret = block_prepare_write(page, from, to, ocfs2_get_block); + + up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_meta_unlock(inode, 0); out: @@ -637,31 +625,11 @@ static ssize_t ocfs2_direct_IO(int rw, int ret; mlog_entry_void(); - - /* - * We get PR data locks even for O_DIRECT. This allows - * concurrent O_DIRECT I/O but doesn't let O_DIRECT with - * extending and buffered zeroing writes race. If they did - * race then the buffered zeroing could be written back after - * the O_DIRECT I/O. It's one thing to tell people not to mix - * buffered and O_DIRECT writes, but expecting them to - * understand that file extension is also an implicit buffered - * write is too much. By getting the PR we force writeback of - * the buffered zeroing before proceeding. - */ - ret = ocfs2_data_lock(inode, 0); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - ocfs2_data_unlock(inode, 0); - ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); -out: mlog_exit(ret); return ret; } diff --git a/trunk/fs/ocfs2/aops.h b/trunk/fs/ocfs2/aops.h index e88c3f0b8fa9..d40456d509a0 100644 --- a/trunk/fs/ocfs2/aops.h +++ b/trunk/fs/ocfs2/aops.h @@ -22,8 +22,8 @@ #ifndef OCFS2_AOPS_H #define OCFS2_AOPS_H -int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, - unsigned from, unsigned to); +int ocfs2_prepare_write(struct file *file, struct page *page, + unsigned from, unsigned to); struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, struct page *page, diff --git a/trunk/fs/ocfs2/extent_map.c b/trunk/fs/ocfs2/extent_map.c index 1a5c69071df6..4601fc256f11 100644 --- a/trunk/fs/ocfs2/extent_map.c +++ b/trunk/fs/ocfs2/extent_map.c @@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode, ret = -ENOMEM; ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, - GFP_NOFS); + GFP_KERNEL); if (!ctxt.new_ent) { mlog_errno(ret); return ret; @@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode, if (ctxt.need_left && !ctxt.left_ent) { ctxt.left_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, - GFP_NOFS); + GFP_KERNEL); if (!ctxt.left_ent) break; } if (ctxt.need_right && !ctxt.right_ent) { ctxt.right_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, - GFP_NOFS); + GFP_KERNEL); if (!ctxt.right_ent) break; } diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c index a9559c874530..581eb451a41a 100644 --- a/trunk/fs/ocfs2/file.c +++ b/trunk/fs/ocfs2/file.c @@ -613,8 +613,7 @@ static int ocfs2_extend_allocation(struct inode *inode, /* Some parts of this taken from generic_cont_expand, which turned out * to be too fragile to do exactly what we need without us having to - * worry about recursive locking in ->prepare_write() and - * ->commit_write(). */ + * worry about recursive locking in ->commit_write(). */ static int ocfs2_write_zero_page(struct inode *inode, u64 size) { @@ -642,7 +641,7 @@ static int ocfs2_write_zero_page(struct inode *inode, goto out; } - ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); + ret = ocfs2_prepare_write(NULL, page, offset, offset); if (ret < 0) { mlog_errno(ret); goto out_unlock; @@ -696,26 +695,13 @@ static int ocfs2_zero_extend(struct inode *inode, return ret; } -/* - * A tail_to_skip value > 0 indicates that we're being called from - * ocfs2_file_aio_write(). This has the following implications: - * - * - we don't want to update i_size - * - di_bh will be NULL, which is fine because it's only used in the - * case where we want to update i_size. - * - ocfs2_zero_extend() will then only be filling the hole created - * between i_size and the start of the write. - */ static int ocfs2_extend_file(struct inode *inode, struct buffer_head *di_bh, - u64 new_i_size, - size_t tail_to_skip) + u64 new_i_size) { int ret = 0; u32 clusters_to_add; - BUG_ON(!tail_to_skip && !di_bh); - /* setattr sometimes calls us like this. */ if (new_i_size == 0) goto out; @@ -728,44 +714,27 @@ static int ocfs2_extend_file(struct inode *inode, OCFS2_I(inode)->ip_clusters; if (clusters_to_add) { - /* - * protect the pages that ocfs2_zero_extend is going to - * be pulling into the page cache.. we do this before the - * metadata extend so that we don't get into the situation - * where we've extended the metadata but can't get the data - * lock to zero. - */ - ret = ocfs2_data_lock(inode, 1); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_extend_allocation(inode, clusters_to_add); if (ret < 0) { mlog_errno(ret); - goto out_unlock; + goto out; } - ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip); + ret = ocfs2_zero_extend(inode, new_i_size); if (ret < 0) { mlog_errno(ret); - goto out_unlock; + goto out; } - } + } - if (!tail_to_skip) { - /* We're being called from ocfs2_setattr() which wants - * us to update i_size */ - ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); - if (ret < 0) - mlog_errno(ret); + /* No allocation required, we just use this helper to + * do a trivial update of i_size. */ + ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); + if (ret < 0) { + mlog_errno(ret); + goto out; } -out_unlock: - if (clusters_to_add) /* this is the only case in which we lock */ - ocfs2_data_unlock(inode, 1); - out: return ret; } @@ -824,7 +793,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (i_size_read(inode) > attr->ia_size) status = ocfs2_truncate_file(inode, bh, attr->ia_size); else - status = ocfs2_extend_file(inode, bh, attr->ia_size, 0); + status = ocfs2_extend_file(inode, bh, attr->ia_size); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -1080,12 +1049,21 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, if (!clusters) break; - ret = ocfs2_extend_file(inode, NULL, newsize, count); + ret = ocfs2_extend_allocation(inode, clusters); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } + + /* Fill any holes which would've been created by this + * write. If we're O_APPEND, this will wind up + * (correctly) being a noop. */ + ret = ocfs2_zero_extend(inode, (u64) newsize - count); + if (ret < 0) { + mlog_errno(ret); + goto out; + } break; } @@ -1168,22 +1146,6 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, ocfs2_iocb_set_rw_locked(iocb); } - /* - * We're fine letting folks race truncates and extending - * writes with read across the cluster, just like they can - * locally. Hence no rw_lock during read. - * - * Take and drop the meta data lock to update inode fields - * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. - */ - ret = ocfs2_meta_lock(inode, NULL, NULL, 0); - if (ret < 0) { - mlog_errno(ret); - goto bail; - } - ocfs2_meta_unlock(inode, 0); - ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); if (ret == -EINVAL) mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index eebc3cfa6be8..6a610ae53583 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb) { struct ocfs2_journal_handle *retval = NULL; - retval = kcalloc(1, sizeof(*retval), GFP_NOFS); + retval = kcalloc(1, sizeof(*retval), GFP_KERNEL); if (!retval) { mlog(ML_ERROR, "Failed to allocate memory for journal " "handle!\n"); @@ -870,11 +870,9 @@ static int ocfs2_force_read_journal(struct inode *inode) if (p_blocks > CONCURRENT_JOURNAL_FILL) p_blocks = CONCURRENT_JOURNAL_FILL; - /* We are reading journal data which should not - * be put in the uptodate cache */ status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), p_blkno, p_blocks, bhs, 0, - NULL); + inode); if (status < 0) { mlog_errno(status); goto bail; @@ -984,7 +982,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, { struct ocfs2_la_recovery_item *item; - item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); + item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL); if (!item) { /* Though we wish to avoid it, we are in fact safe in * skipping local alloc cleanup as fsck.ocfs2 is more diff --git a/trunk/fs/ocfs2/uptodate.c b/trunk/fs/ocfs2/uptodate.c index b8a00a793326..04a684dfdd96 100644 --- a/trunk/fs/ocfs2/uptodate.c +++ b/trunk/fs/ocfs2/uptodate.c @@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, (unsigned long long)oi->ip_blkno, (unsigned long long)block, expand_tree); - new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); + new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL); if (!new) { mlog_errno(-ENOMEM); return; @@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, * has no way of tracking that. */ for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, - GFP_NOFS); + GFP_KERNEL); if (!tree[i]) { mlog_errno(-ENOMEM); goto out_free; diff --git a/trunk/fs/ocfs2/vote.c b/trunk/fs/ocfs2/vote.c index ee42765a8553..53049a204197 100644 --- a/trunk/fs/ocfs2/vote.c +++ b/trunk/fs/ocfs2/vote.c @@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response { struct ocfs2_net_wait_ctxt *w; - w = kcalloc(1, sizeof(*w), GFP_NOFS); + w = kcalloc(1, sizeof(*w), GFP_KERNEL); if (!w) { mlog_errno(-ENOMEM); goto bail; @@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb, BUG_ON(!ocfs2_is_valid_vote_request(type)); - request = kcalloc(1, sizeof(*request), GFP_NOFS); + request = kcalloc(1, sizeof(*request), GFP_KERNEL); if (!request) { mlog_errno(-ENOMEM); } else { @@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg, struct ocfs2_super *osb = data; struct ocfs2_vote_work *work; - work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS); + work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL); if (!work) { status = -ENOMEM; mlog_errno(status); diff --git a/trunk/include/asm-arm/spinlock.h b/trunk/include/asm-arm/spinlock.h index 406ca97a8ab2..43ad4e55878c 100644 --- a/trunk/include/asm-arm/spinlock.h +++ b/trunk/include/asm-arm/spinlock.h @@ -142,9 +142,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "cc"); } -/* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0x80000000) - /* * Read locks are a bit more hairy: * - Exclusively load the lock value. @@ -201,7 +198,4 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) -/* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) - #endif /* __ASM_SPINLOCK_H */ diff --git a/trunk/include/linux/mmc/mmc.h b/trunk/include/linux/mmc/mmc.h index 03a14a30c46a..bdc556d88498 100644 --- a/trunk/include/linux/mmc/mmc.h +++ b/trunk/include/linux/mmc/mmc.h @@ -69,7 +69,6 @@ struct mmc_data { unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ unsigned int timeout_clks; /* data timeout (in clocks) */ unsigned int blksz_bits; /* data block size */ - unsigned int blksz; /* data block size */ unsigned int blocks; /* number of blocks */ unsigned int error; /* data error */ unsigned int flags; diff --git a/trunk/include/net/sctp/command.h b/trunk/include/net/sctp/command.h index 807d6f1ef4b5..34a1a09e5aef 100644 --- a/trunk/include/net/sctp/command.h +++ b/trunk/include/net/sctp/command.h @@ -99,7 +99,6 @@ typedef enum { SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */ SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */ SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */ - SCTP_CMD_SET_SK_ERR, /* Set sk_err */ SCTP_CMD_LAST } sctp_verb_t; diff --git a/trunk/include/net/sctp/sctp.h b/trunk/include/net/sctp/sctp.h index aa6033ca7cd8..e673b2c984e9 100644 --- a/trunk/include/net/sctp/sctp.h +++ b/trunk/include/net/sctp/sctp.h @@ -461,12 +461,12 @@ static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu) * there is room for a param header too. */ #define sctp_walk_params(pos, chunk, member)\ -_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) +_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member) #define _sctp_walk_params(pos, chunk, end, member)\ for (pos.v = chunk->member;\ pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\ - pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ + pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ pos.v += WORD_ROUND(ntohs(pos.p->length))) @@ -477,7 +477,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ sizeof(sctp_chunkhdr_t));\ (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\ - (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ + (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) diff --git a/trunk/net/802/tr.c b/trunk/net/802/tr.c index e9dc803f2fe0..afd8385c0c9c 100644 --- a/trunk/net/802/tr.c +++ b/trunk/net/802/tr.c @@ -643,5 +643,6 @@ static int __init rif_init(void) module_init(rif_init); +EXPORT_SYMBOL(tr_source_route); EXPORT_SYMBOL(tr_type_trans); EXPORT_SYMBOL(alloc_trdev); diff --git a/trunk/net/bridge/netfilter/ebt_log.c b/trunk/net/bridge/netfilter/ebt_log.c index 466ed3440b74..d159c92cca84 100644 --- a/trunk/net/bridge/netfilter/ebt_log.c +++ b/trunk/net/bridge/netfilter/ebt_log.c @@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr, if (info->bitmask & EBT_LOG_NFLOG) nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, - "%s", info->prefix); + info->prefix); else ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, info->prefix); diff --git a/trunk/net/ipv4/netfilter/arp_tables.c b/trunk/net/ipv4/netfilter/arp_tables.c index d0d19192026d..c2d92f99a2b8 100644 --- a/trunk/net/ipv4/netfilter/arp_tables.c +++ b/trunk/net/ipv4/netfilter/arp_tables.c @@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len) write_lock_bh(&t->lock); private = t->private; - if (private->number != tmp.num_counters) { + if (private->number != paddc->num_counters) { ret = -EINVAL; goto unlock_up_free; } diff --git a/trunk/net/ipv4/netfilter/ip_nat_proto_gre.c b/trunk/net/ipv4/netfilter/ip_nat_proto_gre.c index 96ceabaec402..6c4899d8046a 100644 --- a/trunk/net/ipv4/netfilter/ip_nat_proto_gre.c +++ b/trunk/net/ipv4/netfilter/ip_nat_proto_gre.c @@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple, const union ip_conntrack_manip_proto *min, const union ip_conntrack_manip_proto *max) { - __be16 key; + u_int32_t key; if (maniptype == IP_NAT_MANIP_SRC) key = tuple->src.u.gre.key; else key = tuple->dst.u.gre.key; - return ntohs(key) >= ntohs(min->gre.key) - && ntohs(key) <= ntohs(max->gre.key); + return ntohl(key) >= ntohl(min->gre.key) + && ntohl(key) <= ntohl(max->gre.key); } /* generate unique tuple ... */ @@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple, min = 1; range_size = 0xffff; } else { - min = ntohs(range->min.gre.key); - range_size = ntohs(range->max.gre.key) - min + 1; + min = ntohl(range->min.gre.key); + range_size = ntohl(range->max.gre.key) - min + 1; } DEBUGP("min = %u, range_size = %u\n", min, range_size); for (i = 0; i < range_size; i++, key++) { - *keyptr = htons(min + key % range_size); + *keyptr = htonl(min + key % range_size); if (!ip_nat_used_tuple(tuple, conntrack)) return 1; } diff --git a/trunk/net/ipv4/netfilter/ipt_LOG.c b/trunk/net/ipv4/netfilter/ipt_LOG.c index b98f7b08b084..39fd4c2a2386 100644 --- a/trunk/net/ipv4/netfilter/ipt_LOG.c +++ b/trunk/net/ipv4/netfilter/ipt_LOG.c @@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb, if (loginfo->logflags & IPT_LOG_NFLOG) nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, - "%s", loginfo->prefix); + loginfo->prefix); else ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, loginfo->prefix); diff --git a/trunk/net/ipv4/netfilter/ipt_recent.c b/trunk/net/ipv4/netfilter/ipt_recent.c index b847ee409efb..143843285702 100644 --- a/trunk/net/ipv4/netfilter/ipt_recent.c +++ b/trunk/net/ipv4/netfilter/ipt_recent.c @@ -821,7 +821,6 @@ checkentry(const char *tablename, /* Create our proc 'status' entry. */ curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); if (!curr_table->status_proc) { - vfree(hold); printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); /* Destroy the created table */ spin_lock_bh(&recent_lock); @@ -846,6 +845,7 @@ checkentry(const char *tablename, spin_unlock_bh(&recent_lock); vfree(curr_table->time_info); vfree(curr_table->hash_table); + vfree(hold); vfree(curr_table->table); vfree(curr_table); return 0; diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 4a538bc1683d..9f0cca4c4fae 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); - if (IsReno(tp)) - tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1); /* clear xmit_retrans hint */ if (tp->retransmit_skb_hint && diff --git a/trunk/net/ipv6/netfilter/ip6_tables.c b/trunk/net/ipv6/netfilter/ip6_tables.c index 2e72f89a7019..0a673038344f 100644 --- a/trunk/net/ipv6/netfilter/ip6_tables.c +++ b/trunk/net/ipv6/netfilter/ip6_tables.c @@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len) write_lock_bh(&t->lock); private = t->private; - if (private->number != tmp.num_counters) { + if (private->number != paddc->num_counters) { ret = -EINVAL; goto unlock_up_free; } diff --git a/trunk/net/ipv6/netfilter/ip6t_LOG.c b/trunk/net/ipv6/netfilter/ip6t_LOG.c index 73c6300109d6..a96c0de14b00 100644 --- a/trunk/net/ipv6/netfilter/ip6t_LOG.c +++ b/trunk/net/ipv6/netfilter/ip6t_LOG.c @@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb, if (loginfo->logflags & IP6T_LOG_NFLOG) nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, - "%s", loginfo->prefix); + loginfo->prefix); else ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, loginfo->prefix); diff --git a/trunk/net/ipv6/netfilter/ip6t_eui64.c b/trunk/net/ipv6/netfilter/ip6t_eui64.c index 4f6b84c8f4ab..94dbdb8b458d 100644 --- a/trunk/net/ipv6/netfilter/ip6t_eui64.c +++ b/trunk/net/ipv6/netfilter/ip6t_eui64.c @@ -40,7 +40,7 @@ match(const struct sk_buff *skb, memset(eui64, 0, sizeof(eui64)); - if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { + if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { if (skb->nh.ipv6h->version == 0x6) { memcpy(eui64, eth_hdr(skb)->h_source, 3); memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); diff --git a/trunk/net/ipx/af_ipx.c b/trunk/net/ipx/af_ipx.c index 811d998725bc..2dbf134d5266 100644 --- a/trunk/net/ipx/af_ipx.c +++ b/trunk/net/ipx/af_ipx.c @@ -944,9 +944,9 @@ static int ipxitf_create_internal(struct ipx_interface_definition *idef) return rc; } -static __be16 ipx_map_frame_type(unsigned char type) +static int ipx_map_frame_type(unsigned char type) { - __be16 rc = 0; + int rc = 0; switch (type) { case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; diff --git a/trunk/net/ipx/ipx_route.c b/trunk/net/ipx/ipx_route.c index a394c6fe19a2..67774448efd9 100644 --- a/trunk/net/ipx/ipx_route.c +++ b/trunk/net/ipx/ipx_route.c @@ -119,7 +119,7 @@ static int ipxrtr_create(struct ipx_route_definition *rd) return rc; } -static int ipxrtr_delete(__u32 net) +static int ipxrtr_delete(long net) { struct ipx_route *r, *tmp; int rc; diff --git a/trunk/net/netfilter/nfnetlink_log.c b/trunk/net/netfilter/nfnetlink_log.c index 61cdda4e5d3b..c60273cad778 100644 --- a/trunk/net/netfilter/nfnetlink_log.c +++ b/trunk/net/netfilter/nfnetlink_log.c @@ -321,7 +321,7 @@ static int nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) { spin_lock_bh(&inst->lock); - inst->flags = flags; + inst->flags = ntohs(flags); spin_unlock_bh(&inst->lock); return 0; @@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, if (nfula[NFULA_CFG_FLAGS-1]) { u_int16_t flags = *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); - nfulnl_set_flags(inst, ntohs(flags)); + nfulnl_set_flags(inst, ntohl(flags)); } out_put: diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 138ea92ed268..31eb83717c26 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -193,10 +193,8 @@ static void dev_watchdog(unsigned long arg) netif_running(dev) && netif_carrier_ok(dev)) { if (netif_queue_stopped(dev) && - time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { - - printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", - dev->name); + (jiffies - dev->trans_start) > dev->watchdog_timeo) { + printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); dev->tx_timeout(dev); } if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) diff --git a/trunk/net/sctp/input.c b/trunk/net/sctp/input.c index 1662f9cc869e..d117ebc75cf8 100644 --- a/trunk/net/sctp/input.c +++ b/trunk/net/sctp/input.c @@ -73,8 +73,6 @@ static struct sctp_association *__sctp_lookup_association( const union sctp_addr *peer, struct sctp_transport **pt); -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); - /* Calculate the SCTP checksum of an SCTP packet. */ static inline int sctp_rcv_checksum(struct sk_buff *skb) @@ -188,6 +186,7 @@ int sctp_rcv(struct sk_buff *skb) */ if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { + sock_put(sk); if (asoc) { sctp_association_put(asoc); asoc = NULL; @@ -198,6 +197,7 @@ int sctp_rcv(struct sk_buff *skb) sk = sctp_get_ctl_sock(); ep = sctp_sk(sk)->ep; sctp_endpoint_hold(ep); + sock_hold(sk); rcvr = &ep->base; } @@ -253,18 +253,25 @@ int sctp_rcv(struct sk_buff *skb) */ sctp_bh_lock_sock(sk); + /* It is possible that the association could have moved to a different + * socket if it is peeled off. If so, update the sk. + */ + if (sk != rcvr->sk) { + sctp_bh_lock_sock(rcvr->sk); + sctp_bh_unlock_sock(sk); + sk = rcvr->sk; + } + if (sock_owned_by_user(sk)) - sctp_add_backlog(sk, skb); + sk_add_backlog(sk, skb); else - sctp_inq_push(&chunk->rcvr->inqueue, chunk); + sctp_backlog_rcv(sk, skb); + /* Release the sock and the sock ref we took in the lookup calls. + * The asoc/ep ref will be released in sctp_backlog_rcv. + */ sctp_bh_unlock_sock(sk); - - /* Release the asoc/ep ref we took in the lookup calls. */ - if (asoc) - sctp_association_put(asoc); - else - sctp_endpoint_put(ep); + sock_put(sk); return 0; @@ -273,7 +280,8 @@ int sctp_rcv(struct sk_buff *skb) return 0; discard_release: - /* Release the asoc/ep ref we took in the lookup calls. */ + /* Release any structures we may be holding. */ + sock_put(sk); if (asoc) sctp_association_put(asoc); else @@ -282,87 +290,56 @@ int sctp_rcv(struct sk_buff *skb) goto discard_it; } -/* Process the backlog queue of the socket. Every skb on - * the backlog holds a ref on an association or endpoint. - * We hold this ref throughout the state machine to make - * sure that the structure we need is still around. +/* Handle second half of inbound skb processing. If the sock was busy, + * we may have need to delay processing until later when the sock is + * released (on the backlog). If not busy, we call this routine + * directly from the bottom half. */ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; - struct sctp_inq *inqueue = &chunk->rcvr->inqueue; + struct sctp_inq *inqueue = NULL; struct sctp_ep_common *rcvr = NULL; - int backloged = 0; rcvr = chunk->rcvr; - /* If the rcvr is dead then the association or endpoint - * has been deleted and we can safely drop the chunk - * and refs that we are holding. - */ - if (rcvr->dead) { - sctp_chunk_free(chunk); - goto done; - } - - if (unlikely(rcvr->sk != sk)) { - /* In this case, the association moved from one socket to - * another. We are currently sitting on the backlog of the - * old socket, so we need to move. - * However, since we are here in the process context we - * need to take make sure that the user doesn't own - * the new socket when we process the packet. - * If the new socket is user-owned, queue the chunk to the - * backlog of the new socket without dropping any refs. - * Otherwise, we can safely push the chunk on the inqueue. - */ - - sk = rcvr->sk; - sctp_bh_lock_sock(sk); - - if (sock_owned_by_user(sk)) { - sk_add_backlog(sk, skb); - backloged = 1; - } else - sctp_inq_push(inqueue, chunk); - - sctp_bh_unlock_sock(sk); - - /* If the chunk was backloged again, don't drop refs */ - if (backloged) - return 0; - } else { - sctp_inq_push(inqueue, chunk); - } - -done: - /* Release the refs we took in sctp_add_backlog */ - if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) - sctp_association_put(sctp_assoc(rcvr)); - else if (SCTP_EP_TYPE_SOCKET == rcvr->type) - sctp_endpoint_put(sctp_ep(rcvr)); - else - BUG(); - + BUG_TRAP(rcvr->sk == sk); + + if (rcvr->dead) { + sctp_chunk_free(chunk); + } else { + inqueue = &chunk->rcvr->inqueue; + sctp_inq_push(inqueue, chunk); + } + + /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) + sctp_association_put(sctp_assoc(rcvr)); + else + sctp_endpoint_put(sctp_ep(rcvr)); + return 0; } -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) +void sctp_backlog_migrate(struct sctp_association *assoc, + struct sock *oldsk, struct sock *newsk) { - struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; - struct sctp_ep_common *rcvr = chunk->rcvr; - - /* Hold the assoc/ep while hanging on the backlog queue. - * This way, we know structures we need will not disappear from us - */ - if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) - sctp_association_hold(sctp_assoc(rcvr)); - else if (SCTP_EP_TYPE_SOCKET == rcvr->type) - sctp_endpoint_hold(sctp_ep(rcvr)); - else - BUG(); + struct sk_buff *skb; + struct sctp_chunk *chunk; - sk_add_backlog(sk, skb); + skb = oldsk->sk_backlog.head; + oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; + while (skb != NULL) { + struct sk_buff *next = skb->next; + + chunk = SCTP_INPUT_CB(skb)->chunk; + skb->next = NULL; + if (&assoc->base == chunk->rcvr) + sk_add_backlog(newsk, skb); + else + sk_add_backlog(oldsk, skb); + skb = next; + } } /* Handle icmp frag needed error. */ @@ -435,7 +412,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, union sctp_addr daddr; struct sctp_af *af; struct sock *sk = NULL; - struct sctp_association *asoc; + struct sctp_association *asoc = NULL; struct sctp_transport *transport = NULL; *app = NULL; *tpp = NULL; @@ -476,6 +453,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, return sk; out: + sock_put(sk); if (asoc) sctp_association_put(asoc); return NULL; @@ -485,6 +463,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) { sctp_bh_unlock_sock(sk); + sock_put(sk); if (asoc) sctp_association_put(asoc); } @@ -511,7 +490,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) int type = skb->h.icmph->type; int code = skb->h.icmph->code; struct sock *sk; - struct sctp_association *asoc = NULL; + struct sctp_association *asoc; struct sctp_transport *transport; struct inet_sock *inet; char *saveip, *savesctp; @@ -737,6 +716,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l hit: sctp_endpoint_hold(ep); + sock_hold(epb->sk); read_unlock(&head->lock); return ep; } @@ -838,6 +818,7 @@ static struct sctp_association *__sctp_lookup_association( hit: *pt = transport; sctp_association_hold(asoc); + sock_hold(epb->sk); read_unlock(&head->lock); return asoc; } @@ -865,6 +846,7 @@ int sctp_has_association(const union sctp_addr *laddr, struct sctp_transport *transport; if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { + sock_put(asoc->base.sk); sctp_association_put(asoc); return 1; } diff --git a/trunk/net/sctp/sm_sideeffect.c b/trunk/net/sctp/sm_sideeffect.c index c5beb2ad7ef7..8d1dc24bab4c 100644 --- a/trunk/net/sctp/sm_sideeffect.c +++ b/trunk/net/sctp/sm_sideeffect.c @@ -498,6 +498,10 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); + /* Set sk_err to ECONNRESET on a 1-1 style socket. */ + if (!sctp_style(asoc->base.sk, UDP)) + asoc->base.sk->sk_err = ECONNRESET; + /* SEND_FAILED sent later when cleaning up the association. */ asoc->outqueue.error = error; sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); @@ -834,15 +838,6 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) return; } -/* Helper function to set sk_err on a 1-1 style socket. */ -static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) -{ - struct sock *sk = asoc->base.sk; - - if (!sctp_style(sk, UDP)) - sk->sk_err = error; -} - /* These three macros allow us to pull the debugging code out of the * main flow of sctp_do_sm() to keep attention focused on the real * functionality there. @@ -1463,9 +1458,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, local_cork = 0; asoc->peer.retran_path = t; break; - case SCTP_CMD_SET_SK_ERR: - sctp_cmd_set_sk_err(asoc, cmd->obj.error); - break; default: printk(KERN_WARNING "Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index 8bc279219a72..8cdba51ec076 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, - __u16 error, int sk_err, + __u16 error, const struct sctp_association *asoc, struct sctp_transport *transport); @@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, __u32 init_tag; struct sctp_chunk *err_chunk; struct sctp_packet *packet; - __u16 error; + sctp_disposition_t ret; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); @@ -480,9 +480,11 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); - return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM, - ECONNREFUSED, asoc, - chunk->transport); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + return SCTP_DISPOSITION_DELETE_TCB; } /* Verify the INIT chunk before processing it. */ @@ -509,16 +511,27 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); - error = SCTP_ERROR_INV_PARAM; + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, + SCTP_NULL()); + return SCTP_DISPOSITION_CONSUME; } else { - error = SCTP_ERROR_NO_RESOURCE; + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, + SCTP_NULL()); + return SCTP_DISPOSITION_NOMEM; } } else { - sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); - error = SCTP_ERROR_INV_PARAM; + ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, + commands); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, + SCTP_NULL()); + return ret; } - return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, - asoc, chunk->transport); } /* Tag the variable length parameters. Note that we never @@ -873,8 +886,6 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, struct sctp_transport *transport = (struct sctp_transport *) arg; if (asoc->overall_error_count >= asoc->max_retrans) { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); @@ -1019,12 +1030,6 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, commands); hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; - /* Make sure that the length of the parameter is what we expect */ - if (ntohs(hbinfo->param_hdr.length) != - sizeof(sctp_sender_hb_info_t)) { - return SCTP_DISPOSITION_DISCARD; - } - from_addr = hbinfo->daddr; link = sctp_assoc_lookup_paddr(asoc, &from_addr); @@ -2121,8 +2126,6 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, int attempts = asoc->init_err_counter + 1; if (attempts > asoc->max_init_attempts) { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(SCTP_ERROR_STALE_COOKIE)); return SCTP_DISPOSITION_DELETE_TCB; @@ -2259,7 +2262,6 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) error = ((sctp_errhdr_t *)chunk->skb->data)->cause; - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); /* ASSOC_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -2304,8 +2306,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) error = ((sctp_errhdr_t *)chunk->skb->data)->cause; - return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, - chunk->transport); + return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport); } /* @@ -2317,8 +2318,7 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep void *arg, sctp_cmd_seq_t *commands) { - return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, - ENOPROTOOPT, asoc, + return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc, (struct sctp_transport *)arg); } @@ -2343,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep, * This is common code called by several sctp_sf_*_abort() functions above. */ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, - __u16 error, int sk_err, + __u16 error, const struct sctp_association *asoc, struct sctp_transport *transport) { @@ -2353,7 +2353,6 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, SCTP_INC_STATS(SCTP_MIB_ABORTEDS); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); /* CMD_INIT_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(error)); @@ -3337,8 +3336,6 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -3365,8 +3362,6 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -3719,13 +3714,9 @@ static sctp_disposition_t sctp_sf_violation_chunklen( if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNREFUSED)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); } else { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); @@ -4043,8 +4034,6 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( * TCB. This is a departure from our typical NOMEM handling. */ - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); /* Delete the established association. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_USER_ABORT)); @@ -4186,8 +4175,6 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( * TCB. This is a departure from our typical NOMEM handling. */ - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNREFUSED)); /* Delete the established association. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(SCTP_ERROR_USER_ABORT)); @@ -4556,8 +4543,6 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, struct sctp_transport *transport = arg; if (asoc->overall_error_count >= asoc->max_retrans) { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); @@ -4677,8 +4662,6 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" " max_init_attempts: %d\n", attempts, asoc->max_init_attempts); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); return SCTP_DISPOSITION_DELETE_TCB; @@ -4728,8 +4711,6 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); } else { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); return SCTP_DISPOSITION_DELETE_TCB; @@ -4761,8 +4742,6 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); if (asoc->overall_error_count >= asoc->max_retrans) { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); @@ -4838,8 +4817,6 @@ sctp_disposition_t sctp_sf_t4_timer_expire( if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -4893,8 +4870,6 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep, goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); @@ -5334,8 +5309,6 @@ static int sctp_eat_data(const struct sctp_association *asoc, * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_DATA)); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 174d4d35e951..b6e4b89539b3 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -1057,7 +1057,6 @@ static int __sctp_connect(struct sock* sk, inet_sk(sk)->dport = htons(asoc->peer.port); af = sctp_get_af_specific(to.sa.sa_family); af->to_sk_daddr(&to, sk); - sk->sk_err = 0; timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); @@ -1229,7 +1228,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) ep = sctp_sk(sk)->ep; - /* Walk all associations on an endpoint. */ + /* Walk all associations on a socket, not on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); @@ -1242,13 +1241,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) if (sctp_state(asoc, CLOSED)) { sctp_unhash_established(asoc); sctp_association_free(asoc); - continue; - } - } - if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) - sctp_primitive_ABORT(asoc, NULL); - else + } else if (sock_flag(sk, SOCK_LINGER) && + !sk->sk_lingertime) + sctp_primitive_ABORT(asoc, NULL); + else + sctp_primitive_SHUTDOWN(asoc, NULL); + } else sctp_primitive_SHUTDOWN(asoc, NULL); } @@ -5318,7 +5317,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, */ sctp_release_sock(sk); current_timeo = schedule_timeout(current_timeo); - BUG_ON(sk != asoc->base.sk); sctp_lock_sock(sk); *timeo_p = current_timeo; @@ -5606,14 +5604,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, */ newsp->type = type; - /* Mark the new socket "in-use" by the user so that any packets - * that may arrive on the association after we've moved it are - * queued to the backlog. This prevents a potential race between - * backlog processing on the old socket and new-packet processing - * on the new socket. - */ - sctp_lock_sock(newsk); + spin_lock_bh(&oldsk->sk_lock.slock); + /* Migrate the backlog from oldsk to newsk. */ + sctp_backlog_migrate(assoc, oldsk, newsk); + /* Migrate the association to the new socket. */ sctp_assoc_migrate(assoc, newsk); + spin_unlock_bh(&oldsk->sk_lock.slock); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. @@ -5622,7 +5618,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_state = SCTP_SS_ESTABLISHED; - sctp_release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */