diff --git a/[refs] b/[refs] index 23f267efa81c..f22ea77f5d81 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 973ed7c49a5c2271a10ce54ac06bba5617fca3a8 +refs/heads/master: 2302827c95fe0f441025acd5133e532d2eef322b diff --git a/trunk/Documentation/crypto/async-tx-api.txt b/trunk/Documentation/crypto/async-tx-api.txt deleted file mode 100644 index c1e9545c59bd..000000000000 --- a/trunk/Documentation/crypto/async-tx-api.txt +++ /dev/null @@ -1,219 +0,0 @@ - Asynchronous Transfers/Transforms API - -1 INTRODUCTION - -2 GENEALOGY - -3 USAGE -3.1 General format of the API -3.2 Supported operations -3.3 Descriptor management -3.4 When does the operation execute? -3.5 When does the operation complete? -3.6 Constraints -3.7 Example - -4 DRIVER DEVELOPER NOTES -4.1 Conformance points -4.2 "My application needs finer control of hardware channels" - -5 SOURCE - ---- - -1 INTRODUCTION - -The async_tx API provides methods for describing a chain of asynchronous -bulk memory transfers/transforms with support for inter-transactional -dependencies. It is implemented as a dmaengine client that smooths over -the details of different hardware offload engine implementations. Code -that is written to the API can optimize for asynchronous operation and -the API will fit the chain of operations to the available offload -resources. - -2 GENEALOGY - -The API was initially designed to offload the memory copy and -xor-parity-calculations of the md-raid5 driver using the offload engines -present in the Intel(R) Xscale series of I/O processors. It also built -on the 'dmaengine' layer developed for offloading memory copies in the -network stack using Intel(R) I/OAT engines. The following design -features surfaced as a result: -1/ implicit synchronous path: users of the API do not need to know if - the platform they are running on has offload capabilities. The - operation will be offloaded when an engine is available and carried out - in software otherwise. -2/ cross channel dependency chains: the API allows a chain of dependent - operations to be submitted, like xor->copy->xor in the raid5 case. The - API automatically handles cases where the transition from one operation - to another implies a hardware channel switch. -3/ dmaengine extensions to support multiple clients and operation types - beyond 'memcpy' - -3 USAGE - -3.1 General format of the API: -struct dma_async_tx_descriptor * -async_(, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *dependency, - dma_async_tx_callback callback_routine, - void *callback_parameter); - -3.2 Supported operations: -memcpy - memory copy between a source and a destination buffer -memset - fill a destination buffer with a byte value -xor - xor a series of source buffers and write the result to a - destination buffer -xor_zero_sum - xor a series of source buffers and set a flag if the - result is zero. The implementation attempts to prevent - writes to memory - -3.3 Descriptor management: -The return value is non-NULL and points to a 'descriptor' when the operation -has been queued to execute asynchronously. Descriptors are recycled -resources, under control of the offload engine driver, to be reused as -operations complete. When an application needs to submit a chain of -operations it must guarantee that the descriptor is not automatically recycled -before the dependency is submitted. This requires that all descriptors be -acknowledged by the application before the offload engine driver is allowed to -recycle (or free) the descriptor. A descriptor can be acked by one of the -following methods: -1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted -2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent - descriptor of a new operation. -3/ calling async_tx_ack() on the descriptor. - -3.4 When does the operation execute? -Operations do not immediately issue after return from the -async_ call. Offload engine drivers batch operations to -improve performance by reducing the number of mmio cycles needed to -manage the channel. Once a driver-specific threshold is met the driver -automatically issues pending operations. An application can force this -event by calling async_tx_issue_pending_all(). This operates on all -channels since the application has no knowledge of channel to operation -mapping. - -3.5 When does the operation complete? -There are two methods for an application to learn about the completion -of an operation. -1/ Call dma_wait_for_async_tx(). This call causes the CPU to spin while - it polls for the completion of the operation. It handles dependency - chains and issuing pending operations. -2/ Specify a completion callback. The callback routine runs in tasklet - context if the offload engine driver supports interrupts, or it is - called in application context if the operation is carried out - synchronously in software. The callback can be set in the call to - async_, or when the application needs to submit a chain of - unknown length it can use the async_trigger_callback() routine to set a - completion interrupt/callback at the end of the chain. - -3.6 Constraints: -1/ Calls to async_ are not permitted in IRQ context. Other - contexts are permitted provided constraint #2 is not violated. -2/ Completion callback routines cannot submit new operations. This - results in recursion in the synchronous case and spin_locks being - acquired twice in the asynchronous case. - -3.7 Example: -Perform a xor->copy->xor operation where each operation depends on the -result from the previous operation: - -void complete_xor_copy_xor(void *param) -{ - printk("complete\n"); -} - -int run_xor_copy_xor(struct page **xor_srcs, - int xor_src_cnt, - struct page *xor_dest, - size_t xor_len, - struct page *copy_src, - struct page *copy_dest, - size_t copy_len) -{ - struct dma_async_tx_descriptor *tx; - - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); - tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, - tx, complete_xor_copy_xor, NULL); - - async_tx_issue_pending_all(); -} - -See include/linux/async_tx.h for more information on the flags. See the -ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more -implementation examples. - -4 DRIVER DEVELOPMENT NOTES -4.1 Conformance points: -There are a few conformance points required in dmaengine drivers to -accommodate assumptions made by applications using the async_tx API: -1/ Completion callbacks are expected to happen in tasklet context -2/ dma_async_tx_descriptor fields are never manipulated in IRQ context -3/ Use async_tx_run_dependencies() in the descriptor clean up path to - handle submission of dependent operations - -4.2 "My application needs finer control of hardware channels" -This requirement seems to arise from cases where a DMA engine driver is -trying to support device-to-memory DMA. The dmaengine and async_tx -implementations were designed for offloading memory-to-memory -operations; however, there are some capabilities of the dmaengine layer -that can be used for platform-specific channel management. -Platform-specific constraints can be handled by registering the -application as a 'dma_client' and implementing a 'dma_event_callback' to -apply a filter to the available channels in the system. Before showing -how to implement a custom dma_event callback some background of -dmaengine's client support is required. - -The following routines in dmaengine support multiple clients requesting -use of a channel: -- dma_async_client_register(struct dma_client *client) -- dma_async_client_chan_request(struct dma_client *client) - -dma_async_client_register takes a pointer to an initialized dma_client -structure. It expects that the 'event_callback' and 'cap_mask' fields -are already initialized. - -dma_async_client_chan_request triggers dmaengine to notify the client of -all channels that satisfy the capability mask. It is up to the client's -event_callback routine to track how many channels the client needs and -how many it is currently using. The dma_event_callback routine returns a -dma_state_client code to let dmaengine know the status of the -allocation. - -Below is the example of how to extend this functionality for -platform-specific filtering of the available channels beyond the -standard capability mask: - -static enum dma_state_client -my_dma_client_callback(struct dma_client *client, - struct dma_chan *chan, enum dma_state state) -{ - struct dma_device *dma_dev; - struct my_platform_specific_dma *plat_dma_dev; - - dma_dev = chan->device; - plat_dma_dev = container_of(dma_dev, - struct my_platform_specific_dma, - dma_dev); - - if (!plat_dma_dev->platform_specific_capability) - return DMA_DUP; - - . . . -} - -5 SOURCE -include/linux/dmaengine.h: core header file for DMA drivers and clients -drivers/dma/dmaengine.c: offload engine channel management routines -drivers/dma/: location for offload engine drivers -include/linux/async_tx.h: core header file for the async_tx api -crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code -crypto/async_tx/async_memcpy.c: copy offload -crypto/async_tx/async_memset.c: memory fill offload -crypto/async_tx/async_xor.c: xor and xor zero sum offload diff --git a/trunk/Makefile b/trunk/Makefile index 4dac25301d5f..c265e41ec55a 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 23 -EXTRAVERSION =-rc8 +EXTRAVERSION =-rc7 NAME = Arr Matey! A Hairy Bilge Rat! # *DOCUMENTATION* diff --git a/trunk/arch/arm/mach-ep93xx/core.c b/trunk/arch/arm/mach-ep93xx/core.c index 70b2c7801110..851cc7158ca3 100644 --- a/trunk/arch/arm/mach-ep93xx/core.c +++ b/trunk/arch/arm/mach-ep93xx/core.c @@ -336,7 +336,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type) if (line >= 0 && line < 16) { gpio_line_config(line, GPIO_IN); } else { - gpio_line_config(EP93XX_GPIO_LINE_F(line-16), GPIO_IN); + gpio_line_config(EP93XX_GPIO_LINE_F(line), GPIO_IN); } port = line >> 3; diff --git a/trunk/arch/arm/mm/cache-l2x0.c b/trunk/arch/arm/mm/cache-l2x0.c index 76b800a95191..b4e9b734e0bd 100644 --- a/trunk/arch/arm/mm/cache-l2x0.c +++ b/trunk/arch/arm/mm/cache-l2x0.c @@ -57,17 +57,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) { unsigned long addr; - if (start & (CACHE_LINE_SIZE - 1)) { - start &= ~(CACHE_LINE_SIZE - 1); - sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); - start += CACHE_LINE_SIZE; - } - - if (end & (CACHE_LINE_SIZE - 1)) { - end &= ~(CACHE_LINE_SIZE - 1); - sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); - } - + start &= ~(CACHE_LINE_SIZE - 1); for (addr = start; addr < end; addr += CACHE_LINE_SIZE) sync_writel(addr, L2X0_INV_LINE_PA, 1); cache_sync(); diff --git a/trunk/arch/mips/kernel/i8259.c b/trunk/arch/mips/kernel/i8259.c index 3a2d255361bc..b6c30800c667 100644 --- a/trunk/arch/mips/kernel/i8259.c +++ b/trunk/arch/mips/kernel/i8259.c @@ -177,7 +177,10 @@ void mask_and_ack_8259A(unsigned int irq) outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ } - smtc_im_ack_irq(irq); +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ spin_unlock_irqrestore(&i8259A_lock, flags); return; diff --git a/trunk/arch/mips/kernel/irq-msc01.c b/trunk/arch/mips/kernel/irq-msc01.c index 1ecdd50bfc60..410868b5ea5f 100644 --- a/trunk/arch/mips/kernel/irq-msc01.c +++ b/trunk/arch/mips/kernel/irq-msc01.c @@ -52,8 +52,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) mask_msc_irq(irq); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); +#ifdef CONFIG_MIPS_MT_SMTC /* This actually needs to be a call into platform code */ - smtc_im_ack_irq(irq); + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* @@ -70,7 +73,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } - smtc_im_ack_irq(irq); +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* diff --git a/trunk/arch/mips/kernel/irq.c b/trunk/arch/mips/kernel/irq.c index a990aad2f049..aeded6c17de5 100644 --- a/trunk/arch/mips/kernel/irq.c +++ b/trunk/arch/mips/kernel/irq.c @@ -74,12 +74,20 @@ EXPORT_SYMBOL_GPL(free_irqno); */ void ack_bad_irq(unsigned int irq) { - smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } atomic_t irq_err_count; +#ifdef CONFIG_MIPS_MT_SMTC +/* + * SMTC Kernel needs to manipulate low-level CPU interrupt mask + * in do_IRQ. These are passed in setup_irq_smtc() and stored + * in this table. + */ +unsigned long irq_hwmask[NR_IRQS]; +#endif /* CONFIG_MIPS_MT_SMTC */ + /* * Generic, controller-independent functions: */ diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index f09404377ef1..43826c16101d 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -25,11 +25,8 @@ #include /* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. + * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ -unsigned long irq_hwmask[NR_IRQS]; #define LOCK_MT_PRA() \ local_irq_save(flags); \ diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 8a1b001d0b11..e477c9d0498b 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -605,13 +605,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->ccr = 0; regs->gpr[1] = sp; - /* - * We have just cleared all the nonvolatile GPRs, so make - * FULL_REGS(regs) return true. This is necessary to allow - * ptrace to examine the thread immediately after exec. - */ - regs->trap &= ~1UL; - #ifdef CONFIG_PPC32 regs->mq = 0; regs->nip = start; diff --git a/trunk/crypto/async_tx/async_tx.c b/trunk/crypto/async_tx/async_tx.c index bc18cbb8ea79..035007145e78 100644 --- a/trunk/crypto/async_tx/async_tx.c +++ b/trunk/crypto/async_tx/async_tx.c @@ -80,7 +80,6 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { enum dma_status status; struct dma_async_tx_descriptor *iter; - struct dma_async_tx_descriptor *parent; if (!tx) return DMA_SUCCESS; @@ -88,15 +87,8 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) /* poll through the dependency chain, return when tx is complete */ do { iter = tx; - - /* find the root of the unsubmitted dependency chain */ - while (iter->cookie == -EBUSY) { - parent = iter->parent; - if (parent && parent->cookie == -EBUSY) - iter = iter->parent; - else - break; - } + while (iter->cookie == -EBUSY) + iter = iter->parent; status = dma_sync_wait(iter->chan, iter->cookie); } while (status == DMA_IN_PROGRESS || (iter != tx)); diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c index 9f11dc296cdd..2afb3d2086b3 100644 --- a/trunk/drivers/acpi/processor_core.c +++ b/trunk/drivers/acpi/processor_core.c @@ -102,8 +102,6 @@ static struct acpi_driver acpi_processor_driver = { .add = acpi_processor_add, .remove = acpi_processor_remove, .start = acpi_processor_start, - .suspend = acpi_processor_suspend, - .resume = acpi_processor_resume, }, }; diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index f18261368e76..d9b8af763e1e 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -325,23 +325,6 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, #endif -/* - * Suspend / resume control - */ -static int acpi_idle_suspend; - -int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) -{ - acpi_idle_suspend = 1; - return 0; -} - -int acpi_processor_resume(struct acpi_device * device) -{ - acpi_idle_suspend = 0; - return 0; -} - static void acpi_processor_idle(void) { struct acpi_processor *pr = NULL; @@ -372,7 +355,7 @@ static void acpi_processor_idle(void) } cx = pr->power.state; - if (!cx || acpi_idle_suspend) { + if (!cx) { if (pm_idle_save) pm_idle_save(); else diff --git a/trunk/drivers/acpi/sleep/Makefile b/trunk/drivers/acpi/sleep/Makefile index ba9bd403d443..195a4f69c0f7 100644 --- a/trunk/drivers/acpi/sleep/Makefile +++ b/trunk/drivers/acpi/sleep/Makefile @@ -1,4 +1,4 @@ -obj-y := wakeup.o +obj-y := poweroff.o wakeup.o obj-$(CONFIG_ACPI_SLEEP) += main.o obj-$(CONFIG_ACPI_SLEEP) += proc.o diff --git a/trunk/drivers/acpi/sleep/main.c b/trunk/drivers/acpi/sleep/main.c index 85633c585aab..c52ade816fb4 100644 --- a/trunk/drivers/acpi/sleep/main.c +++ b/trunk/drivers/acpi/sleep/main.c @@ -15,9 +15,6 @@ #include #include #include - -#include - #include #include #include "sleep.h" @@ -60,27 +57,6 @@ static int acpi_pm_set_target(suspend_state_t pm_state) return error; } -int acpi_sleep_prepare(u32 acpi_state) -{ -#ifdef CONFIG_ACPI_SLEEP - /* do we have a wakeup address for S2 and S3? */ - if (acpi_state == ACPI_STATE_S3) { - if (!acpi_wakeup_address) { - return -EFAULT; - } - acpi_set_firmware_waking_vector((acpi_physical_address) - virt_to_phys((void *) - acpi_wakeup_address)); - - } - ACPI_FLUSH_CPU_CACHE(); - acpi_enable_wakeup_device_prep(acpi_state); -#endif - acpi_gpe_sleep_prepare(acpi_state); - acpi_enter_sleep_state_prep(acpi_state); - return 0; -} - /** * acpi_pm_prepare - Do preliminary suspend work. * @pm_state: ignored @@ -374,20 +350,6 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) return d_max; } -static void acpi_power_off_prepare(void) -{ - /* Prepare to power off the system */ - acpi_sleep_prepare(ACPI_STATE_S5); -} - -static void acpi_power_off(void) -{ - /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ - printk("%s called\n", __FUNCTION__); - local_irq_disable(); - acpi_enter_sleep_state(ACPI_STATE_S5); -} - int __init acpi_sleep_init(void) { acpi_status status; @@ -401,17 +363,16 @@ int __init acpi_sleep_init(void) if (acpi_disabled) return 0; - sleep_states[ACPI_STATE_S0] = 1; - printk(KERN_INFO PREFIX "(supports S0"); - #ifdef CONFIG_SUSPEND - for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { + printk(KERN_INFO PREFIX "(supports"); + for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) { status = acpi_get_sleep_type_data(i, &type_a, &type_b); if (ACPI_SUCCESS(status)) { sleep_states[i] = 1; printk(" S%d", i); } } + printk(")\n"); pm_set_ops(&acpi_pm_ops); #endif @@ -421,16 +382,10 @@ int __init acpi_sleep_init(void) if (ACPI_SUCCESS(status)) { hibernation_set_ops(&acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; - printk(" S4"); } +#else + sleep_states[ACPI_STATE_S4] = 0; #endif - status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); - if (ACPI_SUCCESS(status)) { - sleep_states[ACPI_STATE_S5] = 1; - printk(" S5"); - pm_power_off_prepare = acpi_power_off_prepare; - pm_power_off = acpi_power_off; - } - printk(")\n"); + return 0; } diff --git a/trunk/drivers/acpi/sleep/poweroff.c b/trunk/drivers/acpi/sleep/poweroff.c new file mode 100644 index 000000000000..39e40d56b034 --- /dev/null +++ b/trunk/drivers/acpi/sleep/poweroff.c @@ -0,0 +1,75 @@ +/* + * poweroff.c - ACPI handler for powering off the system. + * + * AKA S5, but it is independent of whether or not the kernel supports + * any other sleep support in the system. + * + * Copyright (c) 2005 Alexey Starikovskiy + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include "sleep.h" + +int acpi_sleep_prepare(u32 acpi_state) +{ +#ifdef CONFIG_ACPI_SLEEP + /* do we have a wakeup address for S2 and S3? */ + if (acpi_state == ACPI_STATE_S3) { + if (!acpi_wakeup_address) { + return -EFAULT; + } + acpi_set_firmware_waking_vector((acpi_physical_address) + virt_to_phys((void *) + acpi_wakeup_address)); + + } + ACPI_FLUSH_CPU_CACHE(); + acpi_enable_wakeup_device_prep(acpi_state); +#endif + acpi_gpe_sleep_prepare(acpi_state); + acpi_enter_sleep_state_prep(acpi_state); + return 0; +} + +#ifdef CONFIG_PM + +static void acpi_power_off_prepare(void) +{ + /* Prepare to power off the system */ + acpi_sleep_prepare(ACPI_STATE_S5); +} + +static void acpi_power_off(void) +{ + /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ + printk("%s called\n", __FUNCTION__); + local_irq_disable(); + /* Some SMP machines only can poweroff in boot CPU */ + acpi_enter_sleep_state(ACPI_STATE_S5); +} + +static int acpi_poweroff_init(void) +{ + if (!acpi_disabled) { + u8 type_a, type_b; + acpi_status status; + + status = + acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); + if (ACPI_SUCCESS(status)) { + pm_power_off_prepare = acpi_power_off_prepare; + pm_power_off = acpi_power_off; + } + } + return 0; +} + +late_initcall(acpi_poweroff_init); + +#endif /* CONFIG_PM */ diff --git a/trunk/drivers/acpi/video.c b/trunk/drivers/acpi/video.c index d05891f16282..3c9bb85a6a93 100644 --- a/trunk/drivers/acpi/video.c +++ b/trunk/drivers/acpi/video.c @@ -417,6 +417,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) arg0.integer.value = level; status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL); + printk(KERN_DEBUG "set_level status: %x\n", status); return status; } @@ -1753,7 +1754,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video) static int acpi_video_bus_start_devices(struct acpi_video_bus *video) { - return acpi_video_bus_DOS(video, 0, 0); + return acpi_video_bus_DOS(video, 1, 0); } static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) diff --git a/trunk/drivers/char/mspec.c b/trunk/drivers/char/mspec.c index 04ac155d3a07..049a46cc9f87 100644 --- a/trunk/drivers/char/mspec.c +++ b/trunk/drivers/char/mspec.c @@ -155,22 +155,23 @@ mspec_open(struct vm_area_struct *vma) * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages - * belonging to all the vma's sharing this vma_data structure. + * belonging to the vma. */ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; - int index, last_index; + int index, last_index, result; unsigned long my_page; vdata = vma->vm_private_data; - if (!atomic_dec_and_test(&vdata->refcnt)) - return; + BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); - last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; - for (index = 0; index < last_index; index++) { + spin_lock(&vdata->lock); + index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; + last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (; index < last_index; index++) { if (vdata->maddr[index] == 0) continue; /* @@ -179,12 +180,20 @@ mspec_close(struct vm_area_struct *vma) */ my_page = vdata->maddr[index]; vdata->maddr[index] = 0; - if (!mspec_zero_block(my_page, PAGE_SIZE)) + spin_unlock(&vdata->lock); + result = mspec_zero_block(my_page, PAGE_SIZE); + if (!result) uncached_free_page(my_page); else printk(KERN_WARNING "mspec_close(): " - "failed to zero page %ld\n", my_page); + "failed to zero page %i\n", + result); + spin_lock(&vdata->lock); } + spin_unlock(&vdata->lock); + + if (!atomic_dec_and_test(&vdata->refcnt)) + return; if (vdata->flags & VMD_VMALLOCED) vfree(vdata); @@ -192,6 +201,7 @@ mspec_close(struct vm_area_struct *vma) kfree(vdata); } + /* * mspec_nopfn * diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 85c51bdc36f1..ba0428d872aa 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1211,42 +1211,12 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } -static void set_mlx_icrc_seg(void *dseg) -{ - u32 *t = dseg; - struct mlx4_wqe_inline_seg *iseg = dseg; - - t[1] = 0; - - /* - * Need a barrier here before writing the byte_count field to - * make sure that all the data is visible before the - * byte_count field is set. Otherwise, if the segment begins - * a new cacheline, the HCA prefetcher could grab the 64-byte - * chunk and get a valid (!= * 0xffffffff) byte count but - * stale data, and end up sending the wrong data. - */ - wmb(); - - iseg->byte_count = cpu_to_be32((1 << 31) | 4); -} - -static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) +static void set_data_seg(struct mlx4_wqe_data_seg *dseg, + struct ib_sge *sg) { + dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); - - /* - * Need a barrier here before writing the byte_count field to - * make sure that all the data is visible before the - * byte_count field is set. Otherwise, if the segment begins - * a new cacheline, the HCA prefetcher could grab the 64-byte - * chunk and get a valid (!= * 0xffffffff) byte count but - * stale data, and end up sending the wrong data. - */ - wmb(); - - dseg->byte_count = cpu_to_be32(sg->length); } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -1255,7 +1225,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *ctrl; - struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; @@ -1355,27 +1324,22 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; } - /* - * Write data segments in reverse order, so as to - * overwrite cacheline stamp last within each - * cacheline. This avoids issues with WQE - * prefetching. - */ + for (i = 0; i < wr->num_sge; ++i) { + set_data_seg(wqe, wr->sg_list + i); - dseg = wqe; - dseg += wr->num_sge - 1; - size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); + wqe += sizeof (struct mlx4_wqe_data_seg); + size += sizeof (struct mlx4_wqe_data_seg) / 16; + } /* Add one more inline data segment for ICRC for MLX sends */ - if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI)) { - set_mlx_icrc_seg(dseg + 1); + if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) { + ((struct mlx4_wqe_inline_seg *) wqe)->byte_count = + cpu_to_be32((1 << 31) | 4); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mlx4_wqe_data_seg); size += sizeof (struct mlx4_wqe_data_seg) / 16; } - for (i = wr->num_sge - 1; i >= 0; --i, --dseg) - set_data_seg(dseg, wr->sg_list + i); - ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; diff --git a/trunk/drivers/input/mouse/appletouch.c b/trunk/drivers/input/mouse/appletouch.c index a1804bfdbb8c..2bea1b2c631c 100644 --- a/trunk/drivers/input/mouse/appletouch.c +++ b/trunk/drivers/input/mouse/appletouch.c @@ -328,7 +328,6 @@ static void atp_complete(struct urb* urb) { int x, y, x_z, y_z, x_f, y_f; int retval, i, j; - int key; struct atp *dev = urb->context; switch (urb->status) { @@ -469,7 +468,6 @@ static void atp_complete(struct urb* urb) ATP_XFACT, &x_z, &x_f); y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, ATP_YFACT, &y_z, &y_f); - key = dev->data[dev->datalen - 1] & 1; if (x && y) { if (dev->x_old != -1) { @@ -507,7 +505,7 @@ static void atp_complete(struct urb* urb) the first touch unless reinitialised. Do so if it's been idle for a while in order to avoid waking the kernel up several hundred times a second */ - if (!key && atp_is_geyser_3(dev)) { + if (atp_is_geyser_3(dev)) { dev->idlecount++; if (dev->idlecount == 10) { dev->valid = 0; @@ -516,7 +514,7 @@ static void atp_complete(struct urb* urb) } } - input_report_key(dev->input, BTN_LEFT, key); + input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1); input_sync(dev->input); exit: diff --git a/trunk/drivers/kvm/Kconfig b/trunk/drivers/kvm/Kconfig index 0a419a0de603..7b64fd4aa2f3 100644 --- a/trunk/drivers/kvm/Kconfig +++ b/trunk/drivers/kvm/Kconfig @@ -6,8 +6,7 @@ menuconfig VIRTUALIZATION depends on X86 default y ---help--- - Say Y here to get to see options for using your Linux host to run other - operating systems inside virtual machines (guests). + Say Y here to get to see options for virtualization guest drivers. This option alone does not add any kernel code. If you say N, all options in this submenu will be skipped and disabled. diff --git a/trunk/drivers/lguest/lguest_asm.S b/trunk/drivers/lguest/lguest_asm.S index 1ddcd5cd20f6..f182c6a36209 100644 --- a/trunk/drivers/lguest/lguest_asm.S +++ b/trunk/drivers/lguest/lguest_asm.S @@ -22,9 +22,8 @@ jmp lguest_init /*G:055 We create a macro which puts the assembler code between lgstart_ and - * lgend_ markers. These templates are put in the .text section: they can't be - * discarded after boot as we may need to patch modules, too. */ -.text + * lgend_ markers. These templates end up in the .init.text section, so they + * are discarded after boot. */ #define LGUEST_PATCH(name, insns...) \ lgstart_##name: insns; lgend_##name:; \ .globl lgstart_##name; .globl lgend_##name @@ -35,6 +34,7 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) /*:*/ +.text /* These demark the EIP range where host should never deliver interrupts. */ .global lguest_noirq_start .global lguest_noirq_end diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index f96dea975fa5..4d63773ee73a 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; raid5_conf_t *conf = sh->raid_conf; - int i; + int i, more_to_read = 0; pr_debug("%s: stripe %llu\n", __FUNCTION__, (unsigned long long)sh->sector); @@ -522,14 +522,16 @@ static void ops_complete_biofill(void *stripe_head_ref) /* clear completed biofills */ for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; + /* check if this stripe has new incoming reads */ + if (dev->toread) + more_to_read++; /* acknowledge completion of a biofill operation */ - /* and check if we need to reply to a read request, - * new R5_Wantfill requests are held off until - * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) - */ - if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { + /* and check if we need to reply to a read request + */ + if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { struct bio *rbi, *rbi2; + clear_bit(R5_Wantfill, &dev->flags); /* The access to dev->read is outside of the * spin_lock_irq(&conf->device_lock), but is protected @@ -556,7 +558,8 @@ static void ops_complete_biofill(void *stripe_head_ref) return_io(return_bi); - set_bit(STRIPE_HANDLE, &sh->state); + if (more_to_read) + set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } diff --git a/trunk/drivers/net/pcmcia/3c589_cs.c b/trunk/drivers/net/pcmcia/3c589_cs.c index 503f2685fb73..c06cae3f0b56 100644 --- a/trunk/drivers/net/pcmcia/3c589_cs.c +++ b/trunk/drivers/net/pcmcia/3c589_cs.c @@ -116,7 +116,7 @@ struct el3_private { spinlock_t lock; }; -static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; +static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" }; /*====================================================================*/ diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index c921ec32c232..b85ab4a8f2a3 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -1228,10 +1228,7 @@ static void rtl8169_hw_phy_config(struct net_device *dev) return; } - if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && - (tp->mac_version != RTL_GIGA_MAC_VER_03)) - return; - + /* phy config for RTL8169s mac_version C chip */ mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 @@ -2570,15 +2567,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev, (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { netif_wake_queue(dev); } - /* - * 8168 hack: TxPoll requests are lost when the Tx packets are - * too close. Let's kick an extra TxPoll request when a burst - * of start_xmit activity is detected (if it is not detected, - * it is slow enough). -- FR - */ - smp_rmb(); - if (tp->cur_tx != dirty_tx) - RTL_W8(TxPoll, NPQ); } } diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index 0792031a5cf9..eaffe551d1d8 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -338,16 +338,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) if (!(hw->flags & SKY2_HW_GIGABIT)) { /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; - - if (hw->chip_id == CHIP_ID_YUKON_FE_P && - hw->chip_rev == CHIP_REV_YU_FE2_A0) { - u16 spec; - - /* Enable Class A driver for FE+ A0 */ - spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); - spec |= PHY_M_FESC_SEL_CL_A; - gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); - } } else { /* disable energy detect */ ctrl &= ~PHY_M_PC_EN_DET_MSK; @@ -826,8 +816,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); - /* On chips without ram buffer, pause is controled by MAC level */ - if (sky2_read8(hw, B2_E_0) == 0) { + if (!(hw->flags & SKY2_HW_RAMBUFFER)) { sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); @@ -1282,7 +1271,7 @@ static int sky2_up(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - u32 imask, ramsize; + u32 imask; int cap, err = -ENOMEM; struct net_device *otherdev = hw->dev[sky2->port^1]; @@ -1337,12 +1326,13 @@ static int sky2_up(struct net_device *dev) sky2_mac_init(hw, port); - /* Register is number of 4K blocks on internal RAM buffer. */ - ramsize = sky2_read8(hw, B2_E_0) * 4; - if (ramsize > 0) { + if (hw->flags & SKY2_HW_RAMBUFFER) { + /* Register is number of 4K blocks on internal RAM buffer. */ + u32 ramsize = sky2_read8(hw, B2_E_0) * 4; u32 rxspace; - pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); + printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize); + if (ramsize < 16) rxspace = ramsize / 2; else @@ -2005,7 +1995,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) synchronize_irq(hw->pdev->irq); - if (sky2_read8(hw, B2_E_0) == 0) + if (!(hw->flags & SKY2_HW_RAMBUFFER)) sky2_set_tx_stfwd(hw, port); ctl = gma_read16(hw, port, GM_GP_CTRL); @@ -2536,7 +2526,7 @@ static void sky2_watchdog(unsigned long arg) ++active; /* For chips with Rx FIFO, check if stuck */ - if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) && + if ((hw->flags & SKY2_HW_RAMBUFFER) && sky2_rx_hung(dev)) { pr_info(PFX "%s: receiver hang detected\n", dev->name); @@ -2694,10 +2684,8 @@ static int __devinit sky2_init(struct sky2_hw *hw) switch(hw->chip_id) { case CHIP_ID_YUKON_XL: hw->flags = SKY2_HW_GIGABIT - | SKY2_HW_NEWER_PHY; - if (hw->chip_rev < 3) - hw->flags |= SKY2_HW_FIFO_HANG_CHECK; - + | SKY2_HW_NEWER_PHY + | SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_EC_U: @@ -2723,10 +2711,11 @@ static int __devinit sky2_init(struct sky2_hw *hw) dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); return -EOPNOTSUPP; } - hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK; + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_FE: + hw->flags = SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_FE_P: diff --git a/trunk/drivers/net/sky2.h b/trunk/drivers/net/sky2.h index 8bc5c54e3efa..69cd98400fe6 100644 --- a/trunk/drivers/net/sky2.h +++ b/trunk/drivers/net/sky2.h @@ -2063,7 +2063,7 @@ struct sky2_hw { #define SKY2_HW_FIBRE_PHY 0x00000002 #define SKY2_HW_GIGABIT 0x00000004 #define SKY2_HW_NEWER_PHY 0x00000008 -#define SKY2_HW_FIFO_HANG_CHECK 0x00000010 +#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */ #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ diff --git a/trunk/drivers/power/power_supply_sysfs.c b/trunk/drivers/power/power_supply_sysfs.c index de3155b21285..c7c4574729b1 100644 --- a/trunk/drivers/power/power_supply_sysfs.c +++ b/trunk/drivers/power/power_supply_sysfs.c @@ -289,7 +289,6 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp, if (ret) goto out; } - envp[i] = NULL; out: free_page((unsigned long)prop_buf); diff --git a/trunk/drivers/scsi/scsi_transport_spi.c b/trunk/drivers/scsi/scsi_transport_spi.c index 6f56f8750635..4df21c92ff1e 100644 --- a/trunk/drivers/scsi/scsi_transport_spi.c +++ b/trunk/drivers/scsi/scsi_transport_spi.c @@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; + int min_period = spi_min_period(starget); + int max_width = spi_max_width(starget); /* first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); - + if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); @@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) return; } + if (!scsi_device_wide(sdev)) { + spi_max_width(starget) = 0; + max_width = 0; + } + /* test width */ - if (i->f->set_width && spi_max_width(starget) && - scsi_device_wide(sdev)) { + if (i->f->set_width && max_width) { i->f->set_width(starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, @@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); i->f->set_width(starget, 0); + /* Make sure we don't force wide back on by asking + * for a transfer period that requires it */ + max_width = 0; + if (min_period < 10) + min_period = 10; } } @@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); - DV_SET(period, spi_min_period(starget)); + DV_SET(period, min_period); + /* try QAS requests; this should be harmless to set if the * target supports it */ if (scsi_device_qas(sdev)) { @@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) DV_SET(qas, 0); } - if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { + if (scsi_device_ius(sdev) && min_period < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); - if (spi_min_period(starget) == 8) + if (min_period == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); @@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) } else { DV_SET(dt, 1); } + /* set width last because it will pull all the other + * parameters down to required values */ + DV_SET(width, max_width); + /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); diff --git a/trunk/drivers/w1/w1.c b/trunk/drivers/w1/w1.c index a593f900eff4..8d7ab74170d5 100644 --- a/trunk/drivers/w1/w1.c +++ b/trunk/drivers/w1/w1.c @@ -431,7 +431,6 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp, err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, &cur_len, "W1_SLAVE_ID=%024LX", (unsigned long long)sl->reg_num.id); - envp[cur_index] = NULL; if (err) return err; diff --git a/trunk/fs/ufs/super.c b/trunk/fs/ufs/super.c index 38eb0b7a1f3d..73402c5eeb8a 100644 --- a/trunk/fs/ufs/super.c +++ b/trunk/fs/ufs/super.c @@ -894,7 +894,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) goto again; } - sbi->s_flags = flags;/*after that line some functions use s_flags*/ + ufs_print_super_stuff(sb, usb1, usb2, usb3); /* @@ -1025,6 +1025,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) UFS_MOUNT_UFSTYPE_44BSD) uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); + + sbi->s_flags = flags; inode = iget(sb, UFS_ROOTINO); if (!inode || is_bad_inode(inode)) diff --git a/trunk/fs/xfs/xfs_filestream.c b/trunk/fs/xfs/xfs_filestream.c index 36d8f6aa11af..16f8e175167d 100644 --- a/trunk/fs/xfs/xfs_filestream.c +++ b/trunk/fs/xfs/xfs_filestream.c @@ -350,10 +350,9 @@ _xfs_filestream_update_ag( /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ void xfs_fstrm_free_func( - unsigned long ino, - void *data) + xfs_ino_t ino, + fstrm_item_t *item) { - fstrm_item_t *item = (fstrm_item_t *)data; xfs_inode_t *ip = item->ip; int ref; @@ -439,7 +438,7 @@ xfs_filestream_mount( grp_count = 10; err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, - xfs_fstrm_free_func); + (xfs_mru_cache_free_func_t)xfs_fstrm_free_func); return err; } diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index 7174991f4bef..dacb19739cc2 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -1920,9 +1920,9 @@ xlog_recover_do_reg_buffer( stale_buf = 1; break; } - if (dip->di_core.di_mode) + if (be16_to_cpu(dip->di_core.di_mode)) mode_count++; - if (dip->di_core.di_gen) + if (be16_to_cpu(dip->di_core.di_gen)) gen_count++; } diff --git a/trunk/include/acpi/processor.h b/trunk/include/acpi/processor.h index 99934a999e66..ec3ffdadb4d2 100644 --- a/trunk/include/acpi/processor.h +++ b/trunk/include/acpi/processor.h @@ -320,8 +320,6 @@ int acpi_processor_power_init(struct acpi_processor *pr, int acpi_processor_cst_has_changed(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device); -int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); -int acpi_processor_resume(struct acpi_device * device); /* in processor_thermal.c */ int acpi_processor_get_limit_info(struct acpi_processor *pr); diff --git a/trunk/include/asm-mips/irq.h b/trunk/include/asm-mips/irq.h index 2cb52cf8bd4e..97102ebc54b1 100644 --- a/trunk/include/asm-mips/irq.h +++ b/trunk/include/asm-mips/irq.h @@ -24,30 +24,7 @@ static inline int irq_canonicalize(int irq) #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #endif -#ifdef CONFIG_MIPS_MT_SMTC - -struct irqaction; - -extern unsigned long irq_hwmask[]; -extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, - unsigned long hwmask); - -static inline void smtc_im_ack_irq(unsigned int irq) -{ - if (irq_hwmask[irq] & ST0_IM) - set_c0_status(irq_hwmask[irq] & ST0_IM); -} - -#else - -static inline void smtc_im_ack_irq(unsigned int irq) -{ -} - -#endif /* CONFIG_MIPS_MT_SMTC */ - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP - /* * Clear interrupt mask handling "backstop" if irq_hwmask * entry so indicates. This implies that the ack() or end() @@ -61,7 +38,6 @@ do { \ ~(irq_hwmask[irq] & 0x0000ff00)); \ } while (0) #else - #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) #endif @@ -84,6 +60,14 @@ do { \ extern void arch_init_irq(void); extern void spurious_interrupt(void); +#ifdef CONFIG_MIPS_MT_SMTC +struct irqaction; + +extern unsigned long irq_hwmask[]; +extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, + unsigned long hwmask); +#endif /* CONFIG_MIPS_MT_SMTC */ + extern int allocate_irqno(void); extern void alloc_legacy_irqno(void); extern void free_irqno(unsigned int irq); diff --git a/trunk/kernel/time/tick-broadcast.c b/trunk/kernel/time/tick-broadcast.c index 0962e0577660..aab881c86a1a 100644 --- a/trunk/kernel/time/tick-broadcast.c +++ b/trunk/kernel/time/tick-broadcast.c @@ -382,8 +382,23 @@ static int tick_broadcast_set_event(ktime_t expires, int force) int tick_resume_broadcast_oneshot(struct clock_event_device *bc) { + int cpu = smp_processor_id(); + + /* + * If the CPU is marked for broadcast, enforce oneshot + * broadcast mode. The jinxed VAIO does not resume otherwise. + * No idea why it ends up in a lower C State during resume + * without notifying the clock events layer. + */ + if (cpu_isset(cpu, tick_broadcast_mask)) + cpu_set(cpu, tick_broadcast_oneshot_mask); + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); - return 0; + + if(!cpus_empty(tick_broadcast_oneshot_mask)) + tick_broadcast_set_event(ktime_get(), 1); + + return cpu_isset(cpu, tick_broadcast_oneshot_mask); } /* diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 495863a500cd..50a94eee4d92 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -284,7 +284,7 @@ config LOCKDEP select KALLSYMS_ALL config LOCK_STAT - bool "Lock usage statistics" + bool "Lock usage statisitics" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select LOCKDEP select DEBUG_SPINLOCK diff --git a/trunk/sound/core/memalloc.c b/trunk/sound/core/memalloc.c index 9b5656d8bcca..f057430db0d0 100644 --- a/trunk/sound/core/memalloc.c +++ b/trunk/sound/core/memalloc.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -482,54 +481,53 @@ static void free_all_reserved_pages(void) #define SND_MEM_PROC_FILE "driver/snd-page-alloc" static struct proc_dir_entry *snd_mem_proc; -static int snd_mem_proc_read(struct seq_file *seq, void *offset) +static int snd_mem_proc_read(char *page, char **start, off_t off, + int count, int *eof, void *data) { + int len = 0; long pages = snd_allocated_pages >> (PAGE_SHIFT-12); struct snd_mem_list *mem; int devno; static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; mutex_lock(&list_mutex); - seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", - pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); + len += snprintf(page + len, count - len, + "pages : %li bytes (%li pages per %likB)\n", + pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); devno = 0; list_for_each_entry(mem, &mem_list_head, list) { devno++; - seq_printf(seq, "buffer %d : ID %08x : type %s\n", - devno, mem->id, types[mem->buffer.dev.type]); - seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", - (unsigned long)mem->buffer.addr, - (int)mem->buffer.bytes); + len += snprintf(page + len, count - len, + "buffer %d : ID %08x : type %s\n", + devno, mem->id, types[mem->buffer.dev.type]); + len += snprintf(page + len, count - len, + " addr = 0x%lx, size = %d bytes\n", + (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); } mutex_unlock(&list_mutex); - return 0; -} - -static int snd_mem_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, snd_mem_proc_read, NULL); + return len; } /* FIXME: for pci only - other bus? */ #ifdef CONFIG_PCI #define gettoken(bufp) strsep(bufp, " \t\n") -static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static int snd_mem_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) { char buf[128]; char *token, *p; - if (count > sizeof(buf) - 1) - return -EINVAL; + if (count > ARRAY_SIZE(buf) - 1) + count = ARRAY_SIZE(buf) - 1; if (copy_from_user(buf, buffer, count)) return -EFAULT; - buf[count] = '\0'; + buf[ARRAY_SIZE(buf) - 1] = '\0'; p = buf; token = gettoken(&p); if (! token || *token == '#') - return count; + return (int)count; if (strcmp(token, "add") == 0) { char *endp; int vendor, device, size, buffers; @@ -550,7 +548,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, (buffers = simple_strtol(token, NULL, 0)) <= 0 || buffers > 4) { printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); - return count; + return (int)count; } vendor &= 0xffff; device &= 0xffff; @@ -562,7 +560,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, if (pci_set_dma_mask(pci, mask) < 0 || pci_set_consistent_dma_mask(pci, mask) < 0) { printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); - return count; + return (int)count; } } for (i = 0; i < buffers; i++) { @@ -572,7 +570,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, size, &dmab) < 0) { printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); pci_dev_put(pci); - return count; + return (int)count; } snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); } @@ -598,21 +596,9 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, free_all_reserved_pages(); else printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); - return count; + return (int)count; } #endif /* CONFIG_PCI */ - -static const struct file_operations snd_mem_proc_fops = { - .owner = THIS_MODULE, - .open = snd_mem_proc_open, - .read = seq_read, -#ifdef CONFIG_PCI - .write = snd_mem_proc_write, -#endif - .llseek = seq_lseek, - .release = single_release, -}; - #endif /* CONFIG_PROC_FS */ /* @@ -623,8 +609,12 @@ static int __init snd_mem_init(void) { #ifdef CONFIG_PROC_FS snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); - if (snd_mem_proc) - snd_mem_proc->proc_fops = &snd_mem_proc_fops; + if (snd_mem_proc) { + snd_mem_proc->read_proc = snd_mem_proc_read; +#ifdef CONFIG_PCI + snd_mem_proc->write_proc = snd_mem_proc_write; +#endif + } #endif return 0; }