diff --git a/[refs] b/[refs] index c0ac3b99b037..0b36a91ad7b3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2cde4afacad1d66a129ad8787c01ce122382559a +refs/heads/master: 4b2fe7e2a79727104e549a89a32b4aae26521861 diff --git a/trunk/Documentation/crypto/async-tx-api.txt b/trunk/Documentation/crypto/async-tx-api.txt deleted file mode 100644 index c1e9545c59bd..000000000000 --- a/trunk/Documentation/crypto/async-tx-api.txt +++ /dev/null @@ -1,219 +0,0 @@ - Asynchronous Transfers/Transforms API - -1 INTRODUCTION - -2 GENEALOGY - -3 USAGE -3.1 General format of the API -3.2 Supported operations -3.3 Descriptor management -3.4 When does the operation execute? -3.5 When does the operation complete? -3.6 Constraints -3.7 Example - -4 DRIVER DEVELOPER NOTES -4.1 Conformance points -4.2 "My application needs finer control of hardware channels" - -5 SOURCE - ---- - -1 INTRODUCTION - -The async_tx API provides methods for describing a chain of asynchronous -bulk memory transfers/transforms with support for inter-transactional -dependencies. It is implemented as a dmaengine client that smooths over -the details of different hardware offload engine implementations. Code -that is written to the API can optimize for asynchronous operation and -the API will fit the chain of operations to the available offload -resources. - -2 GENEALOGY - -The API was initially designed to offload the memory copy and -xor-parity-calculations of the md-raid5 driver using the offload engines -present in the Intel(R) Xscale series of I/O processors. It also built -on the 'dmaengine' layer developed for offloading memory copies in the -network stack using Intel(R) I/OAT engines. The following design -features surfaced as a result: -1/ implicit synchronous path: users of the API do not need to know if - the platform they are running on has offload capabilities. The - operation will be offloaded when an engine is available and carried out - in software otherwise. -2/ cross channel dependency chains: the API allows a chain of dependent - operations to be submitted, like xor->copy->xor in the raid5 case. The - API automatically handles cases where the transition from one operation - to another implies a hardware channel switch. -3/ dmaengine extensions to support multiple clients and operation types - beyond 'memcpy' - -3 USAGE - -3.1 General format of the API: -struct dma_async_tx_descriptor * -async_(, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *dependency, - dma_async_tx_callback callback_routine, - void *callback_parameter); - -3.2 Supported operations: -memcpy - memory copy between a source and a destination buffer -memset - fill a destination buffer with a byte value -xor - xor a series of source buffers and write the result to a - destination buffer -xor_zero_sum - xor a series of source buffers and set a flag if the - result is zero. The implementation attempts to prevent - writes to memory - -3.3 Descriptor management: -The return value is non-NULL and points to a 'descriptor' when the operation -has been queued to execute asynchronously. Descriptors are recycled -resources, under control of the offload engine driver, to be reused as -operations complete. When an application needs to submit a chain of -operations it must guarantee that the descriptor is not automatically recycled -before the dependency is submitted. This requires that all descriptors be -acknowledged by the application before the offload engine driver is allowed to -recycle (or free) the descriptor. A descriptor can be acked by one of the -following methods: -1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted -2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent - descriptor of a new operation. -3/ calling async_tx_ack() on the descriptor. - -3.4 When does the operation execute? -Operations do not immediately issue after return from the -async_ call. Offload engine drivers batch operations to -improve performance by reducing the number of mmio cycles needed to -manage the channel. Once a driver-specific threshold is met the driver -automatically issues pending operations. An application can force this -event by calling async_tx_issue_pending_all(). This operates on all -channels since the application has no knowledge of channel to operation -mapping. - -3.5 When does the operation complete? -There are two methods for an application to learn about the completion -of an operation. -1/ Call dma_wait_for_async_tx(). This call causes the CPU to spin while - it polls for the completion of the operation. It handles dependency - chains and issuing pending operations. -2/ Specify a completion callback. The callback routine runs in tasklet - context if the offload engine driver supports interrupts, or it is - called in application context if the operation is carried out - synchronously in software. The callback can be set in the call to - async_, or when the application needs to submit a chain of - unknown length it can use the async_trigger_callback() routine to set a - completion interrupt/callback at the end of the chain. - -3.6 Constraints: -1/ Calls to async_ are not permitted in IRQ context. Other - contexts are permitted provided constraint #2 is not violated. -2/ Completion callback routines cannot submit new operations. This - results in recursion in the synchronous case and spin_locks being - acquired twice in the asynchronous case. - -3.7 Example: -Perform a xor->copy->xor operation where each operation depends on the -result from the previous operation: - -void complete_xor_copy_xor(void *param) -{ - printk("complete\n"); -} - -int run_xor_copy_xor(struct page **xor_srcs, - int xor_src_cnt, - struct page *xor_dest, - size_t xor_len, - struct page *copy_src, - struct page *copy_dest, - size_t copy_len) -{ - struct dma_async_tx_descriptor *tx; - - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); - tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, - tx, complete_xor_copy_xor, NULL); - - async_tx_issue_pending_all(); -} - -See include/linux/async_tx.h for more information on the flags. See the -ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more -implementation examples. - -4 DRIVER DEVELOPMENT NOTES -4.1 Conformance points: -There are a few conformance points required in dmaengine drivers to -accommodate assumptions made by applications using the async_tx API: -1/ Completion callbacks are expected to happen in tasklet context -2/ dma_async_tx_descriptor fields are never manipulated in IRQ context -3/ Use async_tx_run_dependencies() in the descriptor clean up path to - handle submission of dependent operations - -4.2 "My application needs finer control of hardware channels" -This requirement seems to arise from cases where a DMA engine driver is -trying to support device-to-memory DMA. The dmaengine and async_tx -implementations were designed for offloading memory-to-memory -operations; however, there are some capabilities of the dmaengine layer -that can be used for platform-specific channel management. -Platform-specific constraints can be handled by registering the -application as a 'dma_client' and implementing a 'dma_event_callback' to -apply a filter to the available channels in the system. Before showing -how to implement a custom dma_event callback some background of -dmaengine's client support is required. - -The following routines in dmaengine support multiple clients requesting -use of a channel: -- dma_async_client_register(struct dma_client *client) -- dma_async_client_chan_request(struct dma_client *client) - -dma_async_client_register takes a pointer to an initialized dma_client -structure. It expects that the 'event_callback' and 'cap_mask' fields -are already initialized. - -dma_async_client_chan_request triggers dmaengine to notify the client of -all channels that satisfy the capability mask. It is up to the client's -event_callback routine to track how many channels the client needs and -how many it is currently using. The dma_event_callback routine returns a -dma_state_client code to let dmaengine know the status of the -allocation. - -Below is the example of how to extend this functionality for -platform-specific filtering of the available channels beyond the -standard capability mask: - -static enum dma_state_client -my_dma_client_callback(struct dma_client *client, - struct dma_chan *chan, enum dma_state state) -{ - struct dma_device *dma_dev; - struct my_platform_specific_dma *plat_dma_dev; - - dma_dev = chan->device; - plat_dma_dev = container_of(dma_dev, - struct my_platform_specific_dma, - dma_dev); - - if (!plat_dma_dev->platform_specific_capability) - return DMA_DUP; - - . . . -} - -5 SOURCE -include/linux/dmaengine.h: core header file for DMA drivers and clients -drivers/dma/dmaengine.c: offload engine channel management routines -drivers/dma/: location for offload engine drivers -include/linux/async_tx.h: core header file for the async_tx api -crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code -crypto/async_tx/async_memcpy.c: copy offload -crypto/async_tx/async_memset.c: memory fill offload -crypto/async_tx/async_xor.c: xor and xor zero sum offload diff --git a/trunk/Documentation/devices.txt b/trunk/Documentation/devices.txt index 6c46730c631a..8de132a02ba9 100644 --- a/trunk/Documentation/devices.txt +++ b/trunk/Documentation/devices.txt @@ -94,8 +94,6 @@ Your cooperation is appreciated. 9 = /dev/urandom Faster, less secure random number gen. 10 = /dev/aio Asynchronous I/O notification interface 11 = /dev/kmsg Writes to this come out as printk's - 12 = /dev/oldmem Used by crashdump kernels to access - the memory of the kernel that crashed. 1 block RAM disk 0 = /dev/ram0 First RAM disk diff --git a/trunk/Documentation/lguest/lguest.c b/trunk/Documentation/lguest/lguest.c index 73c5f1f3d5d2..f7918401a007 100644 --- a/trunk/Documentation/lguest/lguest.c +++ b/trunk/Documentation/lguest/lguest.c @@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov, * of the block file (possibly extending it). */ if (off + len > device_len) { /* Trim it back to the correct length */ - ftruncate64(dev->fd, device_len); + ftruncate(dev->fd, device_len); /* Die, bad Guest, die. */ errx(1, "Write past end %llu+%u", off, len); } diff --git a/trunk/Documentation/lockstat.txt b/trunk/Documentation/lockstat.txt deleted file mode 100644 index 4ba4664ce5c3..000000000000 --- a/trunk/Documentation/lockstat.txt +++ /dev/null @@ -1,120 +0,0 @@ - -LOCK STATISTICS - -- WHAT - -As the name suggests, it provides statistics on locks. - -- WHY - -Because things like lock contention can severely impact performance. - -- HOW - -Lockdep already has hooks in the lock functions and maps lock instances to -lock classes. We build on that. The graph below shows the relation between -the lock functions and the various hooks therein. - - __acquire - | - lock _____ - | \ - | __contended - | | - | - | _______/ - |/ - | - __acquired - | - . - - . - | - __release - | - unlock - -lock, unlock - the regular lock functions -__* - the hooks -<> - states - -With these hooks we provide the following statistics: - - con-bounces - number of lock contention that involved x-cpu data - contentions - number of lock acquisitions that had to wait - wait time min - shortest (non-0) time we ever had to wait for a lock - max - longest time we ever had to wait for a lock - total - total time we spend waiting on this lock - acq-bounces - number of lock acquisitions that involved x-cpu data - acquisitions - number of times we took the lock - hold time min - shortest (non-0) time we ever held the lock - max - longest time we ever held the lock - total - total time this lock was held - -From these number various other statistics can be derived, such as: - - hold time average = hold time total / acquisitions - -These numbers are gathered per lock class, per read/write state (when -applicable). - -It also tracks 4 contention points per class. A contention point is a call site -that had to wait on lock acquisition. - - - USAGE - -Look at the current lock statistics: - -( line numbers not part of actual output, done for clarity in the explanation - below ) - -# less /proc/lock_stat - -01 lock_stat version 0.2 -02 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -03 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total -04 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -05 -06 &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60 -07 &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38 -08 -------------------------- -09 &inode->i_data.tree_lock 0 [] add_to_page_cache+0x5f/0x190 -10 -11 ............................................................................................................................................................................................... -12 -13 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24 -14 ----------- -15 dcache_lock 180 [] sys_getcwd+0x11e/0x230 -16 dcache_lock 165 [] d_alloc+0x15a/0x210 -17 dcache_lock 33 [] _atomic_dec_and_lock+0x4d/0x70 -18 dcache_lock 1 [] shrink_dcache_parent+0x18/0x130 - -This excerpt shows the first two lock class statistics. Line 01 shows the -output version - each time the format changes this will be updated. Line 02-04 -show the header with column descriptions. Lines 05-10 and 13-18 show the actual -statistics. These statistics come in two parts; the actual stats separated by a -short separator (line 08, 14) from the contention points. - -The first lock (05-10) is a read/write lock, and shows two lines above the -short separator. The contention points don't match the column descriptors, -they have two: contentions and [] symbol. - - -View the top contending locks: - -# grep : /proc/lock_stat | head - &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60 - &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38 - dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24 - &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74 - &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06 - &inode->i_data.i_mmap_lock: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44 - &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52 - &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62 - &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63 - tasklist_lock-W: 15 15 1.45 10.87 32.70 1201 7390 0.58 62.55 13648.47 - -Clear the statistics: - -# echo 0 > /proc/lock_stat diff --git a/trunk/Documentation/sysrq.txt b/trunk/Documentation/sysrq.txt index 10c8f6922ef4..ef19142896ca 100644 --- a/trunk/Documentation/sysrq.txt +++ b/trunk/Documentation/sysrq.txt @@ -43,7 +43,7 @@ On x86 - You press the key combo 'ALT-SysRq-'. Note - Some keyboards may not have a key labeled 'SysRq'. The 'SysRq' key is also known as the 'Print Screen' key. Also some keyboards cannot handle so many keys being pressed at the same time, so you might - have better luck with "press Alt", "press SysRq", "release SysRq", + have better luck with "press Alt", "press SysRq", "release Alt", "press ", release everything. On SPARC - You press 'ALT-STOP-', I believe. diff --git a/trunk/Makefile b/trunk/Makefile index 4635a64da36c..c265e41ec55a 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 23 -EXTRAVERSION = +EXTRAVERSION =-rc7 NAME = Arr Matey! A Hairy Bilge Rat! # *DOCUMENTATION* diff --git a/trunk/arch/arm/kernel/bios32.c b/trunk/arch/arm/kernel/bios32.c index a2dd930d11ef..240c448ec31c 100644 --- a/trunk/arch/arm/kernel/bios32.c +++ b/trunk/arch/arm/kernel/bios32.c @@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root) * pcibios_fixup_bus - Called after each bus is probed, * but before its children are examined. */ -void pcibios_fixup_bus(struct pci_bus *bus) +void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_sys_data *root = bus->sysdata; struct pci_dev *dev; @@ -419,7 +419,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) /* * Convert from Linux-centric to bus-centric addresses for bridge devices. */ -void +void __devinit pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, struct resource *res) { diff --git a/trunk/arch/arm/mach-s3c2440/mach-osiris.c b/trunk/arch/arm/mach-s3c2440/mach-osiris.c index c326983f4a8f..0ba7e9060c7b 100644 --- a/trunk/arch/arm/mach-s3c2440/mach-osiris.c +++ b/trunk/arch/arm/mach-s3c2440/mach-osiris.c @@ -276,21 +276,7 @@ static unsigned char pm_osiris_ctrl0; static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) { - unsigned int tmp; - pm_osiris_ctrl0 = __raw_readb(OSIRIS_VA_CTRL0); - tmp = pm_osiris_ctrl0 & ~OSIRIS_CTRL0_NANDSEL; - - /* ensure correct NAND slot is selected on resume */ - if ((pm_osiris_ctrl0 & OSIRIS_CTRL0_BOOT_INT) == 0) - tmp |= 2; - - __raw_writeb(tmp, OSIRIS_VA_CTRL0); - - /* ensure that an nRESET is not generated on resume. */ - s3c2410_gpio_setpin(S3C2410_GPA21, 1); - s3c2410_gpio_cfgpin(S3C2410_GPA21, S3C2410_GPA21_OUT); - return 0; } @@ -299,10 +285,6 @@ static int osiris_pm_resume(struct sys_device *sd) if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); - __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); - - s3c2410_gpio_cfgpin(S3C2410_GPA21, S3C2410_GPA21_nRSTOUT); - return 0; } diff --git a/trunk/arch/blackfin/kernel/bfin_gpio.c b/trunk/arch/blackfin/kernel/bfin_gpio.c index 5d488ef965ce..bafcfa52142b 100644 --- a/trunk/arch/blackfin/kernel/bfin_gpio.c +++ b/trunk/arch/blackfin/kernel/bfin_gpio.c @@ -84,7 +84,6 @@ #include #include #include -#include #include #ifdef BF533_FAMILY @@ -116,11 +115,7 @@ static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { }; #endif -static unsigned short reserved_gpio_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; -static unsigned short reserved_peri_map[gpio_bank(MAX_BLACKFIN_GPIOS + 16)]; -char *str_ident = NULL; - -#define RESOURCE_LABEL_SIZE 16 +static unsigned short reserved_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; #ifdef CONFIG_PM static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; @@ -148,100 +143,22 @@ inline int check_gpio(unsigned short gpio) return 0; } -static void set_label(unsigned short ident, const char *label) -{ - - if (label && str_ident) { - strncpy(str_ident + ident * RESOURCE_LABEL_SIZE, label, - RESOURCE_LABEL_SIZE); - str_ident[ident * RESOURCE_LABEL_SIZE + - RESOURCE_LABEL_SIZE - 1] = 0; - } -} - -static char *get_label(unsigned short ident) -{ - if (!str_ident) - return "UNKNOWN"; - - return (str_ident[ident * RESOURCE_LABEL_SIZE] ? - (str_ident + ident * RESOURCE_LABEL_SIZE) : "UNKNOWN"); -} - -static int cmp_label(unsigned short ident, const char *label) -{ - if (label && str_ident) - return strncmp(str_ident + ident * RESOURCE_LABEL_SIZE, - label, strlen(label)); - else - return -EINVAL; -} - #ifdef BF537_FAMILY static void port_setup(unsigned short gpio, unsigned short usage) { - if (!check_gpio(gpio)) { - if (usage == GPIO_USAGE) { - *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio); - } else - *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); - SSYNC(); - } + if (usage == GPIO_USAGE) { + if (*port_fer[gpio_bank(gpio)] & gpio_bit(gpio)) + printk(KERN_WARNING "bfin-gpio: Possible Conflict with Peripheral " + "usage and GPIO %d detected!\n", gpio); + *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio); + } else + *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); + SSYNC(); } #else # define port_setup(...) do { } while (0) #endif -#ifdef BF537_FAMILY - -#define PMUX_LUT_RES 0 -#define PMUX_LUT_OFFSET 1 -#define PMUX_LUT_ENTRIES 41 -#define PMUX_LUT_SIZE 2 - -static unsigned short port_mux_lut[PMUX_LUT_ENTRIES][PMUX_LUT_SIZE] = { - {P_PPI0_D13, 11}, {P_PPI0_D14, 11}, {P_PPI0_D15, 11}, - {P_SPORT1_TFS, 11}, {P_SPORT1_TSCLK, 11}, {P_SPORT1_DTPRI, 11}, - {P_PPI0_D10, 10}, {P_PPI0_D11, 10}, {P_PPI0_D12, 10}, - {P_SPORT1_RSCLK, 10}, {P_SPORT1_RFS, 10}, {P_SPORT1_DRPRI, 10}, - {P_PPI0_D8, 9}, {P_PPI0_D9, 9}, {P_SPORT1_DRSEC, 9}, - {P_SPORT1_DTSEC, 9}, {P_TMR2, 8}, {P_PPI0_FS3, 8}, {P_TMR3, 7}, - {P_SPI0_SSEL4, 7}, {P_TMR4, 6}, {P_SPI0_SSEL5, 6}, {P_TMR5, 5}, - {P_SPI0_SSEL6, 5}, {P_UART1_RX, 4}, {P_UART1_TX, 4}, {P_TMR6, 4}, - {P_TMR7, 4}, {P_UART0_RX, 3}, {P_UART0_TX, 3}, {P_DMAR0, 3}, - {P_DMAR1, 3}, {P_SPORT0_DTSEC, 1}, {P_SPORT0_DRSEC, 1}, - {P_CAN0_RX, 1}, {P_CAN0_TX, 1}, {P_SPI0_SSEL7, 1}, - {P_SPORT0_TFS, 0}, {P_SPORT0_DTPRI, 0}, {P_SPI0_SSEL2, 0}, - {P_SPI0_SSEL3, 0} -}; - -static void portmux_setup(unsigned short per, unsigned short function) -{ - u16 y, muxreg, offset; - - for (y = 0; y < PMUX_LUT_ENTRIES; y++) { - if (port_mux_lut[y][PMUX_LUT_RES] == per) { - - /* SET PORTMUX REG */ - - offset = port_mux_lut[y][PMUX_LUT_OFFSET]; - muxreg = bfin_read_PORT_MUX(); - - if (offset != 1) { - muxreg &= ~(1 << offset); - } else { - muxreg &= ~(3 << 1); - } - - muxreg |= (function << offset); - bfin_write_PORT_MUX(muxreg); - } - } -} - -#else -# define portmux_setup(...) do { } while (0) -#endif static void default_gpio(unsigned short gpio) { @@ -262,15 +179,22 @@ static void default_gpio(unsigned short gpio) static int __init bfin_gpio_init(void) { - - str_ident = kzalloc(RESOURCE_LABEL_SIZE * 256, GFP_KERNEL); - if (!str_ident) - return -ENOMEM; + int i; printk(KERN_INFO "Blackfin GPIO Controller\n"); - return 0; + for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) + reserved_map[gpio_bank(i)] = 0; + +#if defined(BF537_FAMILY) && (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) +# if defined(CONFIG_BFIN_MAC_RMII) + reserved_map[gpio_bank(PORT_H)] = 0xC373; +# else + reserved_map[gpio_bank(PORT_H)] = 0xFFFF; +# endif +#endif + return 0; } arch_initcall(bfin_gpio_init); @@ -299,7 +223,7 @@ arch_initcall(bfin_gpio_init); void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ { \ unsigned long flags; \ - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ local_irq_save(flags); \ if (arg) \ gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ @@ -319,7 +243,7 @@ SET_GPIO(both) #define SET_GPIO_SC(name) \ void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ { \ - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ if (arg) \ gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ else \ @@ -334,7 +258,7 @@ SET_GPIO_SC(maskb) void set_gpio_data(unsigned short gpio, unsigned short arg) { unsigned long flags; - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); local_irq_save(flags); if (arg) gpio_bankb[gpio_bank(gpio)]->data_set = gpio_bit(gpio); @@ -353,7 +277,7 @@ SET_GPIO_SC(data) void set_gpio_toggle(unsigned short gpio) { unsigned long flags; - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); local_irq_save(flags); gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); bfin_read_CHIPID(); @@ -362,7 +286,7 @@ void set_gpio_toggle(unsigned short gpio) #else void set_gpio_toggle(unsigned short gpio) { - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); } #endif @@ -426,7 +350,7 @@ unsigned short get_gpio_data(unsigned short gpio) { unsigned long flags; unsigned short ret; - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); local_irq_save(flags); ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)); bfin_read_CHIPID(); @@ -570,14 +494,13 @@ u32 gpio_pm_setup(void) gpio_bank_saved[bank].dir = gpio_bankb[bank]->dir; gpio_bank_saved[bank].edge = gpio_bankb[bank]->edge; gpio_bank_saved[bank].both = gpio_bankb[bank]->both; - gpio_bank_saved[bank].reserved = - reserved_gpio_map[bank]; + gpio_bank_saved[bank].reserved = reserved_map[bank]; gpio = i; while (mask) { if (mask & 1) { - reserved_gpio_map[gpio_bank(gpio)] |= + reserved_map[gpio_bank(gpio)] |= gpio_bit(gpio); bfin_gpio_wakeup_type(gpio, wakeup_flags_map[gpio]); @@ -617,8 +540,7 @@ void gpio_pm_restore(void) gpio_bankb[bank]->edge = gpio_bank_saved[bank].edge; gpio_bankb[bank]->both = gpio_bank_saved[bank].both; - reserved_gpio_map[bank] = - gpio_bank_saved[bank].reserved; + reserved_map[bank] = gpio_bank_saved[bank].reserved; } @@ -628,141 +550,6 @@ void gpio_pm_restore(void) #endif - - - -int peripheral_request(unsigned short per, const char *label) -{ - unsigned long flags; - unsigned short ident = P_IDENT(per); - - /* - * Don't cares are pins with only one dedicated function - */ - - if (per & P_DONTCARE) - return 0; - - if (!(per & P_DEFINED)) - return -ENODEV; - - local_irq_save(flags); - - if (!check_gpio(ident)) { - - if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { - printk(KERN_ERR - "%s: Peripheral %d is already reserved as GPIO by %s !\n", - __FUNCTION__, ident, get_label(ident)); - dump_stack(); - local_irq_restore(flags); - return -EBUSY; - } - - } - - if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) { - - /* - * Pin functions like AMC address strobes my - * be requested and used by several drivers - */ - - if (!(per & P_MAYSHARE)) { - - /* - * Allow that the identical pin function can - * be requested from the same driver twice - */ - - if (cmp_label(ident, label) == 0) - goto anyway; - - printk(KERN_ERR - "%s: Peripheral %d function %d is already" - "reserved by %s !\n", - __FUNCTION__, ident, P_FUNCT2MUX(per), - get_label(ident)); - dump_stack(); - local_irq_restore(flags); - return -EBUSY; - } - - } - -anyway: - - - portmux_setup(per, P_FUNCT2MUX(per)); - - port_setup(ident, PERIPHERAL_USAGE); - - reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident); - local_irq_restore(flags); - set_label(ident, label); - - return 0; -} -EXPORT_SYMBOL(peripheral_request); - -int peripheral_request_list(unsigned short per[], const char *label) -{ - u16 cnt; - int ret; - - for (cnt = 0; per[cnt] != 0; cnt++) { - ret = peripheral_request(per[cnt], label); - if (ret < 0) - return ret; - } - - return 0; -} -EXPORT_SYMBOL(peripheral_request_list); - -void peripheral_free(unsigned short per) -{ - unsigned long flags; - unsigned short ident = P_IDENT(per); - - if (per & P_DONTCARE) - return; - - if (!(per & P_DEFINED)) - return; - - if (check_gpio(ident) < 0) - return; - - local_irq_save(flags); - - if (unlikely(!(reserved_peri_map[gpio_bank(ident)] - & gpio_bit(ident)))) { - local_irq_restore(flags); - return; - } - - if (!(per & P_MAYSHARE)) { - port_setup(ident, GPIO_USAGE); - } - - reserved_peri_map[gpio_bank(ident)] &= ~gpio_bit(ident); - - local_irq_restore(flags); -} -EXPORT_SYMBOL(peripheral_free); - -void peripheral_free_list(unsigned short per[]) -{ - u16 cnt; - - for (cnt = 0; per[cnt] != 0; cnt++) { - peripheral_free(per[cnt]); - } - -} -EXPORT_SYMBOL(peripheral_free_list); - /*********************************************************** * * FUNCTIONS: Blackfin GPIO Driver @@ -787,13 +574,13 @@ int gpio_request(unsigned short gpio, const char *label) local_irq_save(flags); - if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) { + if (unlikely(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))) { printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved!\n", gpio); dump_stack(); local_irq_restore(flags); return -EBUSY; } - reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio); + reserved_map[gpio_bank(gpio)] |= gpio_bit(gpio); local_irq_restore(flags); @@ -812,7 +599,7 @@ void gpio_free(unsigned short gpio) local_irq_save(flags); - if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { + if (unlikely(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { printk(KERN_ERR "bfin-gpio: GPIO %d wasn't reserved!\n", gpio); dump_stack(); local_irq_restore(flags); @@ -821,7 +608,7 @@ void gpio_free(unsigned short gpio) default_gpio(gpio); - reserved_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); + reserved_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); local_irq_restore(flags); } @@ -831,7 +618,7 @@ void gpio_direction_input(unsigned short gpio) { unsigned long flags; - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); local_irq_save(flags); gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); @@ -844,7 +631,7 @@ void gpio_direction_output(unsigned short gpio) { unsigned long flags; - BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); + BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); local_irq_save(flags); gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); diff --git a/trunk/arch/blackfin/mach-common/entry.S b/trunk/arch/blackfin/mach-common/entry.S index 960458808344..d61bba98fb54 100644 --- a/trunk/arch/blackfin/mach-common/entry.S +++ b/trunk/arch/blackfin/mach-common/entry.S @@ -815,7 +815,7 @@ _extable: ALIGN ENTRY(_sys_call_table) - .long _sys_restart_syscall /* 0 */ + .long _sys_ni_syscall /* 0 - old "setup()" system call*/ .long _sys_exit .long _sys_fork .long _sys_read @@ -978,13 +978,13 @@ ENTRY(_sys_call_table) .long _sys_sched_get_priority_min /* 160 */ .long _sys_sched_rr_get_interval .long _sys_nanosleep - .long _sys_mremap + .long _sys_ni_syscall /* sys_mremap */ .long _sys_setresuid /* setresuid16 */ .long _sys_getresuid /* getresuid16 */ /* 165 */ .long _sys_ni_syscall /* for vm86 */ .long _sys_ni_syscall /* old "query_module" */ .long _sys_ni_syscall /* sys_poll */ - .long _sys_nfsservctl + .long _sys_ni_syscall /* sys_nfsservctl */ .long _sys_setresgid /* setresgid16 */ /* 170 */ .long _sys_getresgid /* getresgid16 */ .long _sys_prctl @@ -1040,7 +1040,7 @@ ENTRY(_sys_call_table) .long _sys_ni_syscall /* reserved for TUX */ .long _sys_ni_syscall .long _sys_gettid - .long _sys_readahead /* 225 */ + .long _sys_ni_syscall /* 225 */ /* sys_readahead */ .long _sys_setxattr .long _sys_lsetxattr .long _sys_fsetxattr @@ -1157,21 +1157,6 @@ ENTRY(_sys_call_table) .long _sys_shmctl .long _sys_shmdt /* 340 */ .long _sys_shmget - .long _sys_splice - .long _sys_sync_file_range - .long _sys_tee - .long _sys_vmsplice /* 345 */ - .long _sys_epoll_pwait - .long _sys_utimensat - .long _sys_signalfd - .long _sys_timerfd - .long _sys_eventfd /* 350 */ - .long _sys_pread64 - .long _sys_pwrite64 - .long _sys_fadvise64 - .long _sys_set_robust_list - .long _sys_get_robust_list /* 355 */ - .long _sys_fallocate .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall .endr diff --git a/trunk/arch/i386/boot/memory.c b/trunk/arch/i386/boot/memory.c index 378353956b5d..1a2e62db8bed 100644 --- a/trunk/arch/i386/boot/memory.c +++ b/trunk/arch/i386/boot/memory.c @@ -20,7 +20,6 @@ static int detect_memory_e820(void) { - int count = 0; u32 next = 0; u32 size, id; u8 err; @@ -28,33 +27,20 @@ static int detect_memory_e820(void) do { size = sizeof(struct e820entry); - - /* Important: %edx is clobbered by some BIOSes, - so it must be either used for the error output - or explicitly marked clobbered. */ + id = SMAP; asm("int $0x15; setc %0" - : "=d" (err), "+b" (next), "=a" (id), "+c" (size), + : "=am" (err), "+b" (next), "+d" (id), "+c" (size), "=m" (*desc) - : "D" (desc), "d" (SMAP), "a" (0xe820)); - - /* Some BIOSes stop returning SMAP in the middle of - the search loop. We don't know exactly how the BIOS - screwed up the map at that point, we might have a - partial map, the full map, or complete garbage, so - just return failure. */ - if (id != SMAP) { - count = 0; - break; - } + : "D" (desc), "a" (0xe820)); - if (err) + if (err || id != SMAP) break; - count++; + boot_params.e820_entries++; desc++; - } while (next && count < E820MAX); + } while (next && boot_params.e820_entries < E820MAX); - return boot_params.e820_entries = count; + return boot_params.e820_entries; } static int detect_memory_e801(void) @@ -103,16 +89,11 @@ static int detect_memory_88(void) int detect_memory(void) { - int err = -1; - if (detect_memory_e820() > 0) - err = 0; + return 0; if (!detect_memory_e801()) - err = 0; - - if (!detect_memory_88()) - err = 0; + return 0; - return err; + return detect_memory_88(); } diff --git a/trunk/arch/i386/kernel/cpu/cpufreq/longhaul.c b/trunk/arch/i386/kernel/cpu/cpufreq/longhaul.c index f0cce3c2dc3a..ef8f0bc3fc71 100644 --- a/trunk/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/trunk/arch/i386/kernel/cpu/cpufreq/longhaul.c @@ -76,7 +76,6 @@ static unsigned int longhaul_index; /* Module parameters */ static int scale_voltage; static int disable_acpi_c3; -static int revid_errata; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) @@ -169,10 +168,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index, rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ - if (!revid_errata) - longhaul.bits.RevisionKey = longhaul.bits.RevisionID; - else - longhaul.bits.RevisionKey = 0; + longhaul.bits.RevisionKey = longhaul.bits.RevisionID; longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; /* Setup new voltage */ @@ -276,7 +272,7 @@ static void longhaul_setstate(unsigned int table_index) dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); -retry_loop: + preempt_disable(); local_irq_save(flags); @@ -348,47 +344,6 @@ static void longhaul_setstate(unsigned int table_index) preempt_enable(); freqs.new = calc_speed(longhaul_get_cpu_mult()); - /* Check if requested frequency is set. */ - if (unlikely(freqs.new != speed)) { - printk(KERN_INFO PFX "Failed to set requested frequency!\n"); - /* Revision ID = 1 but processor is expecting revision key - * equal to 0. Jumpers at the bottom of processor will change - * multiplier and FSB, but will not change bits in Longhaul - * MSR nor enable voltage scaling. */ - if (!revid_errata) { - printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " - "option.\n"); - revid_errata = 1; - msleep(200); - goto retry_loop; - } - /* Why ACPI C3 sometimes doesn't work is a mystery for me. - * But it does happen. Processor is entering ACPI C3 state, - * but it doesn't change frequency. I tried poking various - * bits in northbridge registers, but without success. */ - if (longhaul_flags & USE_ACPI_C3) { - printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); - longhaul_flags &= ~USE_ACPI_C3; - if (revid_errata) { - printk(KERN_INFO PFX "Disabling \"Ignore " - "Revision ID\" option.\n"); - revid_errata = 0; - } - msleep(200); - goto retry_loop; - } - /* This shouldn't happen. Longhaul ver. 2 was reported not - * working on processors without voltage scaling, but with - * RevID = 1. RevID errata will make things right. Just - * to be 100% sure. */ - if (longhaul_version == TYPE_LONGHAUL_V2) { - printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); - longhaul_version = TYPE_LONGHAUL_V1; - msleep(200); - goto retry_loop; - } - } - /* Report true CPU frequency */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) @@ -1001,20 +956,11 @@ static void __exit longhaul_exit(void) kfree(longhaul_table); } -/* Even if BIOS is exporting ACPI C3 state, and it is used - * with success when CPU is idle, this state doesn't - * trigger frequency transition in some cases. */ module_param (disable_acpi_c3, int, 0644); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); -/* Change CPU voltage with frequency. Very usefull to save - * power, but most VIA C3 processors aren't supporting it. */ + module_param (scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); -/* Force revision key to 0 for processors which doesn't - * support voltage scaling, but are introducing itself as - * such. */ -module_param(revid_errata, int, 0644); -MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); MODULE_AUTHOR ("Dave Jones "); MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); diff --git a/trunk/arch/i386/xen/mmu.c b/trunk/arch/i386/xen/mmu.c index 874db0cd1d2a..4ae038aa6c24 100644 --- a/trunk/arch/i386/xen/mmu.c +++ b/trunk/arch/i386/xen/mmu.c @@ -559,9 +559,6 @@ void xen_exit_mmap(struct mm_struct *mm) put_cpu(); spin_lock(&mm->page_table_lock); - - /* pgd may not be pinned in the error exit path of execve */ - if (PagePinned(virt_to_page(mm->pgd))) - xen_pgd_unpin(mm->pgd); + xen_pgd_unpin(mm->pgd); spin_unlock(&mm->page_table_lock); } diff --git a/trunk/arch/mips/au1000/common/pci.c b/trunk/arch/mips/au1000/common/pci.c index 9be99a68932a..6c25e6c09f78 100644 --- a/trunk/arch/mips/au1000/common/pci.c +++ b/trunk/arch/mips/au1000/common/pci.c @@ -74,7 +74,6 @@ static int __init au1x_pci_setup(void) printk(KERN_ERR "Unable to ioremap pci space\n"); return 1; } - au1x_controller.io_map_base = virt_io_addr; #ifdef CONFIG_DMA_NONCOHERENT { diff --git a/trunk/arch/mips/au1000/mtx-1/board_setup.c b/trunk/arch/mips/au1000/mtx-1/board_setup.c index 2c460c116570..7bc5af8917da 100644 --- a/trunk/arch/mips/au1000/mtx-1/board_setup.c +++ b/trunk/arch/mips/au1000/mtx-1/board_setup.c @@ -54,11 +54,11 @@ void board_reset (void) void __init board_setup(void) { -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI // enable USB power switch au_writel( au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR ); au_writel( 0x100000, GPIO2_OUTPUT ); -#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ +#endif // defined (CONFIG_USB_OHCI) #ifdef CONFIG_PCI #if defined(__MIPSEB__) diff --git a/trunk/arch/mips/au1000/pb1000/board_setup.c b/trunk/arch/mips/au1000/pb1000/board_setup.c index 0aed89114bfc..824cfafaff92 100644 --- a/trunk/arch/mips/au1000/pb1000/board_setup.c +++ b/trunk/arch/mips/au1000/pb1000/board_setup.c @@ -54,7 +54,7 @@ void __init board_setup(void) au_writel(0, SYS_PINSTATERD); udelay(100); -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI /* zero and disable FREQ2 */ sys_freqctrl = au_readl(SYS_FREQCTRL0); sys_freqctrl &= ~0xFFF00000; @@ -102,7 +102,7 @@ void __init board_setup(void) /* * Route 48MHz FREQ2 into USB Host and/or Device */ -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI sys_clksrc |= ((4<<12) | (0<<11) | (0<<10)); #endif au_writel(sys_clksrc, SYS_CLKSRC); @@ -116,7 +116,7 @@ void __init board_setup(void) au_writel(pin_func, SYS_PINFUNC); au_writel(0x2800, SYS_TRIOUTCLR); au_writel(0x0030, SYS_OUTPUTCLR); -#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ +#endif // defined (CONFIG_USB_OHCI) // make gpio 15 an input (for interrupt line) pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x100); diff --git a/trunk/arch/mips/au1000/pb1100/board_setup.c b/trunk/arch/mips/au1000/pb1100/board_setup.c index 259ca05860c3..6bc1f8e1b608 100644 --- a/trunk/arch/mips/au1000/pb1100/board_setup.c +++ b/trunk/arch/mips/au1000/pb1100/board_setup.c @@ -54,7 +54,7 @@ void __init board_setup(void) au_writel(0, SYS_PININPUTEN); udelay(100); -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI { u32 pin_func, sys_freqctrl, sys_clksrc; @@ -98,7 +98,7 @@ void __init board_setup(void) pin_func |= 0x8000; au_writel(pin_func, SYS_PINFUNC); } -#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ +#endif // defined (CONFIG_USB_OHCI) /* Enable sys bus clock divider when IDLE state or no bus activity. */ au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); diff --git a/trunk/arch/mips/au1000/pb1500/board_setup.c b/trunk/arch/mips/au1000/pb1500/board_setup.c index a2d850db8902..c9b655616fb3 100644 --- a/trunk/arch/mips/au1000/pb1500/board_setup.c +++ b/trunk/arch/mips/au1000/pb1500/board_setup.c @@ -56,7 +56,7 @@ void __init board_setup(void) au_writel(0, SYS_PINSTATERD); udelay(100); -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI /* GPIO201 is input for PCMCIA card detect */ /* GPIO203 is input for PCMCIA interrupt request */ @@ -85,7 +85,7 @@ void __init board_setup(void) /* * Route 48MHz FREQ2 into USB Host and/or Device */ -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) +#ifdef CONFIG_USB_OHCI sys_clksrc |= ((4<<12) | (0<<11) | (0<<10)); #endif au_writel(sys_clksrc, SYS_CLKSRC); @@ -95,7 +95,7 @@ void __init board_setup(void) // 2nd USB port is USB host pin_func |= 0x8000; au_writel(pin_func, SYS_PINFUNC); -#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ +#endif // defined (CONFIG_USB_OHCI) diff --git a/trunk/arch/mips/kernel/i8259.c b/trunk/arch/mips/kernel/i8259.c index 3a2d255361bc..b6c30800c667 100644 --- a/trunk/arch/mips/kernel/i8259.c +++ b/trunk/arch/mips/kernel/i8259.c @@ -177,7 +177,10 @@ void mask_and_ack_8259A(unsigned int irq) outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ } - smtc_im_ack_irq(irq); +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ spin_unlock_irqrestore(&i8259A_lock, flags); return; diff --git a/trunk/arch/mips/kernel/irq-msc01.c b/trunk/arch/mips/kernel/irq-msc01.c index 1ecdd50bfc60..410868b5ea5f 100644 --- a/trunk/arch/mips/kernel/irq-msc01.c +++ b/trunk/arch/mips/kernel/irq-msc01.c @@ -52,8 +52,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) mask_msc_irq(irq); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); +#ifdef CONFIG_MIPS_MT_SMTC /* This actually needs to be a call into platform code */ - smtc_im_ack_irq(irq); + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* @@ -70,7 +73,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } - smtc_im_ack_irq(irq); +#ifdef CONFIG_MIPS_MT_SMTC + if (irq_hwmask[irq] & ST0_IM) + set_c0_status(irq_hwmask[irq] & ST0_IM); +#endif /* CONFIG_MIPS_MT_SMTC */ } /* diff --git a/trunk/arch/mips/kernel/irq.c b/trunk/arch/mips/kernel/irq.c index a990aad2f049..aeded6c17de5 100644 --- a/trunk/arch/mips/kernel/irq.c +++ b/trunk/arch/mips/kernel/irq.c @@ -74,12 +74,20 @@ EXPORT_SYMBOL_GPL(free_irqno); */ void ack_bad_irq(unsigned int irq) { - smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } atomic_t irq_err_count; +#ifdef CONFIG_MIPS_MT_SMTC +/* + * SMTC Kernel needs to manipulate low-level CPU interrupt mask + * in do_IRQ. These are passed in setup_irq_smtc() and stored + * in this table. + */ +unsigned long irq_hwmask[NR_IRQS]; +#endif /* CONFIG_MIPS_MT_SMTC */ + /* * Generic, controller-independent functions: */ diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S index dd68afce7da5..b3ed731a24c6 100644 --- a/trunk/arch/mips/kernel/scall64-o32.S +++ b/trunk/arch/mips/kernel/scall64-o32.S @@ -525,5 +525,5 @@ sys_call_table: PTR compat_sys_signalfd PTR compat_sys_timerfd PTR sys_eventfd - PTR sys32_fallocate /* 4320 */ + PTR sys_fallocate /* 4320 */ .size sys_call_table,.-sys_call_table diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index f09404377ef1..43826c16101d 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -25,11 +25,8 @@ #include /* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. + * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ -unsigned long irq_hwmask[NR_IRQS]; #define LOCK_MT_PRA() \ local_irq_save(flags); \ diff --git a/trunk/arch/mips/kernel/vmlinux.lds.S b/trunk/arch/mips/kernel/vmlinux.lds.S index 087ab997487d..60bbaecde187 100644 --- a/trunk/arch/mips/kernel/vmlinux.lds.S +++ b/trunk/arch/mips/kernel/vmlinux.lds.S @@ -45,8 +45,6 @@ SECTIONS __dbe_table : { *(__dbe_table) } __stop___dbe_table = .; - NOTES - RODATA /* writeable */ diff --git a/trunk/arch/mips/mm/pg-r4k.c b/trunk/arch/mips/mm/pg-r4k.c index e47e9e9486bf..dc795be62807 100644 --- a/trunk/arch/mips/mm/pg-r4k.c +++ b/trunk/arch/mips/mm/pg-r4k.c @@ -209,7 +209,7 @@ static inline void build_cdex_p(void) } if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - build_insn_word(0x8c200000); /* lw $zero, ($at) */ + build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ mi.c_format.opcode = cache_op; mi.c_format.rs = 4; /* $a0 */ diff --git a/trunk/arch/mips/pci/ops-mace.c b/trunk/arch/mips/pci/ops-mace.c index fe5451449304..8008e31c5e81 100644 --- a/trunk/arch/mips/pci/ops-mace.c +++ b/trunk/arch/mips/pci/ops-mace.c @@ -29,20 +29,22 @@ * 4 N/C */ -static inline int mkaddr(struct pci_bus *bus, unsigned int devfn, - unsigned int reg) -{ - return ((bus->number & 0xff) << 16) | - ((devfn & 0xff) << 8) | - (reg & 0xfc); -} +#define chkslot(_bus,_devfn) \ +do { \ + if ((_bus)->number > 0 || PCI_SLOT (_devfn) < 1 \ + || PCI_SLOT (_devfn) > 3) \ + return PCIBIOS_DEVICE_NOT_FOUND; \ +} while (0) +#define mkaddr(_devfn, _reg) \ +((((_devfn) & 0xffUL) << 8) | ((_reg) & 0xfcUL)) static int mace_pci_read_config(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 *val) { - mace->pci.config_addr = mkaddr(bus, devfn, reg); + chkslot(bus, devfn); + mace->pci.config_addr = mkaddr(devfn, reg); switch (size) { case 1: *val = mace->pci.config_data.b[(reg & 3) ^ 3]; @@ -64,7 +66,8 @@ static int mace_pci_write_config(struct pci_bus *bus, unsigned int devfn, int reg, int size, u32 val) { - mace->pci.config_addr = mkaddr(bus, devfn, reg); + chkslot(bus, devfn); + mace->pci.config_addr = mkaddr(devfn, reg); switch (size) { case 1: mace->pci.config_data.b[(reg & 3) ^ 3] = val; diff --git a/trunk/arch/mips/sgi-ip32/ip32-platform.c b/trunk/arch/mips/sgi-ip32/ip32-platform.c index 7309e48d163d..ba3697ee7ff6 100644 --- a/trunk/arch/mips/sgi-ip32/ip32-platform.c +++ b/trunk/arch/mips/sgi-ip32/ip32-platform.c @@ -41,8 +41,8 @@ static struct platform_device uart8250_device = { static int __init uart8250_init(void) { - uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1; - uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1; + uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1; + uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1; return platform_device_register(&uart8250_device); } diff --git a/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts b/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts index 44c065a6b5e7..502f47c01797 100644 --- a/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/trunk/arch/powerpc/boot/dts/mpc8349emitx.dts @@ -99,7 +99,6 @@ #size-cells = <0>; interrupt-parent = < &ipic >; interrupts = <26 8>; - dr_mode = "peripheral"; phy_type = "ulpi"; }; diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 8a1b001d0b11..e477c9d0498b 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -605,13 +605,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->ccr = 0; regs->gpr[1] = sp; - /* - * We have just cleared all the nonvolatile GPRs, so make - * FULL_REGS(regs) return true. This is necessary to allow - * ptrace to examine the thread immediately after exec. - */ - regs->trap &= ~1UL; - #ifdef CONFIG_PPC32 regs->mq = 0; regs->nip = start; diff --git a/trunk/arch/powerpc/platforms/83xx/usb.c b/trunk/arch/powerpc/platforms/83xx/usb.c index eafe7605cdac..e7fdf013cd39 100644 --- a/trunk/arch/powerpc/platforms/83xx/usb.c +++ b/trunk/arch/powerpc/platforms/83xx/usb.c @@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void) if (port0_is_dr) printk(KERN_WARNING "834x USB port0 can't be used by both DR and MPH!\n"); - sicrl &= ~MPC834X_SICRL_USB0; + sicrl |= MPC834X_SICRL_USB0; } prop = of_get_property(np, "port1", NULL); if (prop) { if (port1_is_dr) printk(KERN_WARNING "834x USB port1 can't be used by both DR and MPH!\n"); - sicrl &= ~MPC834X_SICRL_USB1; + sicrl |= MPC834X_SICRL_USB1; } of_node_put(np); } diff --git a/trunk/arch/powerpc/platforms/cell/spufs/file.c b/trunk/arch/powerpc/platforms/cell/spufs/file.c index 7de4e919687b..4100ddc52f02 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/file.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/file.c @@ -2177,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = { { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, - { "signal1", &spufs_signal1_fops, 0666, }, - { "signal2", &spufs_signal2_fops, 0666, }, + { "signal1", &spufs_signal1_nosched_fops, 0222, }, + { "signal2", &spufs_signal2_nosched_fops, 0222, }, { "signal1_type", &spufs_signal1_type, 0666, }, { "signal2_type", &spufs_signal2_type, 0666, }, { "cntl", &spufs_cntl_fops, 0666, }, diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c index f0b5ff17d860..5bd90a7eb763 100644 --- a/trunk/arch/powerpc/platforms/pseries/xics.c +++ b/trunk/arch/powerpc/platforms/pseries/xics.c @@ -419,7 +419,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) * For the moment only implement delivery to all cpus or one cpu. * Get current irq_server for the given irq */ - irq_server = get_irq_server(virq, 1); + irq_server = get_irq_server(irq, 1); if (irq_server == -1) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); diff --git a/trunk/arch/powerpc/sysdev/commproc.c b/trunk/arch/powerpc/sysdev/commproc.c index dd5417aec1b4..4f67b89ba1d0 100644 --- a/trunk/arch/powerpc/sysdev/commproc.c +++ b/trunk/arch/powerpc/sysdev/commproc.c @@ -395,4 +395,4 @@ uint cpm_dpram_phys(u8* addr) { return (dpram_pbase + (uint)(addr - dpram_vbase)); } -EXPORT_SYMBOL(cpm_dpram_phys); +EXPORT_SYMBOL(cpm_dpram_addr); diff --git a/trunk/arch/ppc/8xx_io/commproc.c b/trunk/arch/ppc/8xx_io/commproc.c index 9da880be4dc0..7088428e1fe2 100644 --- a/trunk/arch/ppc/8xx_io/commproc.c +++ b/trunk/arch/ppc/8xx_io/commproc.c @@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump); void *cpm_dpram_addr(unsigned long offset) { - return (void *)(dpram_vbase + offset); + return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; } EXPORT_SYMBOL(cpm_dpram_addr); diff --git a/trunk/arch/sparc/kernel/ebus.c b/trunk/arch/sparc/kernel/ebus.c index d850785b2080..e2d02fd13f35 100644 --- a/trunk/arch/sparc/kernel/ebus.c +++ b/trunk/arch/sparc/kernel/ebus.c @@ -156,8 +156,6 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d dev->prom_node = dp; regs = of_get_property(dp, "reg", &len); - if (!regs) - len = 0; if (len % sizeof(struct linux_prom_registers)) { prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", dev->prom_node->name, len, diff --git a/trunk/arch/sparc64/kernel/binfmt_aout32.c b/trunk/arch/sparc64/kernel/binfmt_aout32.c index d208cc7804f2..f205fc7cbcd0 100644 --- a/trunk/arch/sparc64/kernel/binfmt_aout32.c +++ b/trunk/arch/sparc64/kernel/binfmt_aout32.c @@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr get_user(c,p++); } while (c); } - put_user(0,argv); + put_user(NULL,argv); current->mm->arg_end = current->mm->env_start = (unsigned long) p; while (envc-->0) { char c; @@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr get_user(c,p++); } while (c); } - put_user(0,envp); + put_user(NULL,envp); current->mm->env_end = (unsigned long) p; return sp; } diff --git a/trunk/arch/sparc64/kernel/ebus.c b/trunk/arch/sparc64/kernel/ebus.c index 04ab81cb4f48..bc9ae36f7a43 100644 --- a/trunk/arch/sparc64/kernel/ebus.c +++ b/trunk/arch/sparc64/kernel/ebus.c @@ -375,10 +375,7 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de dev->num_addrs = 0; dev->num_irqs = 0; } else { - const int *regs = of_get_property(dp, "reg", &len); - - if (!regs) - len = 0; + (void) of_get_property(dp, "reg", &len); dev->num_addrs = len / sizeof(struct linux_prom_registers); for (i = 0; i < dev->num_addrs; i++) diff --git a/trunk/arch/sparc64/kernel/pci_common.c b/trunk/arch/sparc64/kernel/pci_common.c index c76bfbb7da08..2f61c4b12596 100644 --- a/trunk/arch/sparc64/kernel/pci_common.c +++ b/trunk/arch/sparc64/kernel/pci_common.c @@ -264,7 +264,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int func = PCI_FUNC(devfn); unsigned long ret; - if (!bus && devfn == 0x00) + if (bus_dev == pbm->pci_bus && devfn == 0x00) return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, size, value); if (config_out_of_range(pbm, bus, devfn, where)) { @@ -300,7 +300,7 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int func = PCI_FUNC(devfn); unsigned long ret; - if (!bus && devfn == 0x00) + if (bus_dev == pbm->pci_bus && devfn == 0x00) return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, size, value); if (config_out_of_range(pbm, bus, devfn, where)) { diff --git a/trunk/arch/sparc64/kernel/prom.c b/trunk/arch/sparc64/kernel/prom.c index a246e962e5a7..0614dff63d7c 100644 --- a/trunk/arch/sparc64/kernel/prom.c +++ b/trunk/arch/sparc64/kernel/prom.c @@ -1046,8 +1046,7 @@ static void __init irq_trans_init(struct device_node *dp) if (!strcmp(dp->name, "fhc") && !strcmp(dp->parent->name, "central")) return central_irq_trans_init(dp); - if (!strcmp(dp->name, "virtual-devices") || - !strcmp(dp->name, "niu")) + if (!strcmp(dp->name, "virtual-devices")) return sun4v_vdev_irq_trans_init(dp); } diff --git a/trunk/arch/sparc64/kernel/smp.c b/trunk/arch/sparc64/kernel/smp.c index c73b7a48b036..b84c49e3697c 100644 --- a/trunk/arch/sparc64/kernel/smp.c +++ b/trunk/arch/sparc64/kernel/smp.c @@ -353,8 +353,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) int timeout, ret; p = fork_idle(cpu); - if (IS_ERR(p)) - return PTR_ERR(p); callin_flag = 0; cpu_new_thread = task_thread_info(p); diff --git a/trunk/arch/sparc64/kernel/vio.c b/trunk/arch/sparc64/kernel/vio.c index 0c1ee619d814..1550ac5673da 100644 --- a/trunk/arch/sparc64/kernel/vio.c +++ b/trunk/arch/sparc64/kernel/vio.c @@ -292,7 +292,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, } vdev->dp = dp; - printk(KERN_INFO "VIO: Adding device %s\n", vdev->dev.bus_id); + printk(KERN_ERR "VIO: Adding device %s\n", vdev->dev.bus_id); err = device_register(&vdev->dev); if (err) { @@ -342,33 +342,8 @@ static struct mdesc_notifier_client vio_device_notifier = { .node_name = "virtual-device-port", }; -/* We are only interested in domain service ports under the - * "domain-services" node. On control nodes there is another port - * under "openboot" that we should not mess with as aparently that is - * reserved exclusively for OBP use. - */ -static void vio_add_ds(struct mdesc_handle *hp, u64 node) -{ - int found; - u64 a; - - found = 0; - mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { - u64 target = mdesc_arc_target(hp, a); - const char *name = mdesc_node_name(hp, target); - - if (!strcmp(name, "domain-services")) { - found = 1; - break; - } - } - - if (found) - (void) vio_create_one(hp, node, &root_vdev->dev); -} - static struct mdesc_notifier_client vio_ds_notifier = { - .add = vio_add_ds, + .add = vio_add, .remove = vio_remove, .node_name = "domain-services-port", }; diff --git a/trunk/arch/sparc64/lib/NGcopy_from_user.S b/trunk/arch/sparc64/lib/NGcopy_from_user.S index e7f433f71b42..2d93456f76dd 100644 --- a/trunk/arch/sparc64/lib/NGcopy_from_user.S +++ b/trunk/arch/sparc64/lib/NGcopy_from_user.S @@ -1,6 +1,6 @@ /* NGcopy_from_user.S: Niagara optimized copy from userspace. * - * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #define EX_LD(x) \ @@ -8,8 +8,8 @@ .section .fixup; \ .align 4; \ 99: wr %g0, ASI_AIUS, %asi;\ - ret; \ - restore %g0, 1, %o0; \ + retl; \ + mov 1, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ @@ -24,7 +24,7 @@ #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest #define LOAD_TWIN(addr_reg,dest0,dest1) \ ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 -#define EX_RETVAL(x) %g0 +#define EX_RETVAL(x) 0 #ifdef __KERNEL__ #define PREAMBLE \ diff --git a/trunk/arch/sparc64/lib/NGcopy_to_user.S b/trunk/arch/sparc64/lib/NGcopy_to_user.S index 6ea01c5532a0..34112d5054ef 100644 --- a/trunk/arch/sparc64/lib/NGcopy_to_user.S +++ b/trunk/arch/sparc64/lib/NGcopy_to_user.S @@ -1,6 +1,6 @@ /* NGcopy_to_user.S: Niagara optimized copy to userspace. * - * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #define EX_ST(x) \ @@ -8,8 +8,8 @@ .section .fixup; \ .align 4; \ 99: wr %g0, ASI_AIUS, %asi;\ - ret; \ - restore %g0, 1, %o0; \ + retl; \ + mov 1, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ @@ -23,7 +23,7 @@ #define FUNC_NAME NGcopy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS -#define EX_RETVAL(x) %g0 +#define EX_RETVAL(x) 0 #ifdef __KERNEL__ /* Writing to %asi is _expensive_ so we hardcode it. diff --git a/trunk/arch/sparc64/lib/NGmemcpy.S b/trunk/arch/sparc64/lib/NGmemcpy.S index 96a14caf6966..66063a9a66b8 100644 --- a/trunk/arch/sparc64/lib/NGmemcpy.S +++ b/trunk/arch/sparc64/lib/NGmemcpy.S @@ -1,6 +1,6 @@ /* NGmemcpy.S: Niagara optimized memcpy. * - * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #ifdef __KERNEL__ @@ -16,12 +16,6 @@ wr %g0, ASI_PNF, %asi #endif -#ifdef __sparc_v9__ -#define SAVE_AMOUNT 128 -#else -#define SAVE_AMOUNT 64 -#endif - #ifndef STORE_ASI #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P #endif @@ -56,11 +50,7 @@ #endif #ifndef STORE_INIT -#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_INIT(src,addr) stxa src, [addr] %asi -#else -#define STORE_INIT(src,addr) stx src, [addr + 0x00] -#endif #endif #ifndef FUNC_NAME @@ -83,19 +73,18 @@ .globl FUNC_NAME .type FUNC_NAME,#function -FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ - PREAMBLE - save %sp, -SAVE_AMOUNT, %sp - srlx %i2, 31, %g2 +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 cmp %g2, 0 tne %xcc, 5 - mov %i0, %o0 - cmp %i2, 0 + PREAMBLE + mov %o0, GLOBAL_SPARE + cmp %o2, 0 be,pn %XCC, 85f - or %o0, %i1, %i3 - cmp %i2, 16 + or %o0, %o1, %o3 + cmp %o2, 16 blu,a,pn %XCC, 80f - or %i3, %i2, %i3 + or %o3, %o2, %o3 /* 2 blocks (128 bytes) is the minimum we can do the block * copy with. We need to ensure that we'll iterate at least @@ -104,31 +93,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ * to (64 - 1) bytes from the length before we perform the * block copy loop. */ - cmp %i2, (2 * 64) + cmp %o2, (2 * 64) blu,pt %XCC, 70f - andcc %i3, 0x7, %g0 + andcc %o3, 0x7, %g0 /* %o0: dst - * %i1: src - * %i2: len (known to be >= 128) + * %o1: src + * %o2: len (known to be >= 128) * - * The block copy loops will use %i4/%i5,%g2/%g3 as + * The block copy loops will use %o4/%o5,%g2/%g3 as * temporaries while copying the data. */ - LOAD(prefetch, %i1, #one_read) + LOAD(prefetch, %o1, #one_read) wr %g0, STORE_ASI, %asi /* Align destination on 64-byte boundary. */ - andcc %o0, (64 - 1), %i4 + andcc %o0, (64 - 1), %o4 be,pt %XCC, 2f - sub %i4, 64, %i4 - sub %g0, %i4, %i4 ! bytes to align dst - sub %i2, %i4, %i2 -1: subcc %i4, 1, %i4 - EX_LD(LOAD(ldub, %i1, %g1)) + sub %o4, 64, %o4 + sub %g0, %o4, %o4 ! bytes to align dst + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1)) EX_ST(STORE(stb, %g1, %o0)) - add %i1, 1, %i1 + add %o1, 1, %o1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -147,155 +136,111 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ * aligned store data at a time, this is easy to ensure. */ 2: - andcc %i1, (16 - 1), %i4 - andn %i2, (64 - 1), %g1 ! block copy loop iterator + andcc %o1, (16 - 1), %o4 + andn %o2, (64 - 1), %g1 ! block copy loop iterator + sub %o2, %g1, %o2 ! final sub-block copy bytes be,pt %XCC, 50f - sub %i2, %g1, %i2 ! final sub-block copy bytes - - cmp %i4, 8 - be,pt %XCC, 10f - sub %i1, %i4, %i1 + cmp %o4, 8 + be,a,pt %XCC, 10f + sub %o1, 0x8, %o1 /* Neither 8-byte nor 16-byte aligned, shift and mask. */ - and %i4, 0x7, GLOBAL_SPARE - sll GLOBAL_SPARE, 3, GLOBAL_SPARE - mov 64, %i5 - EX_LD(LOAD_TWIN(%i1, %g2, %g3)) - sub %i5, GLOBAL_SPARE, %i5 - mov 16, %o4 - mov 32, %o5 - mov 48, %o7 - mov 64, %i3 - - bg,pn %XCC, 9f - nop - -#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \ - sllx WORD1, POST_SHIFT, WORD1; \ - srlx WORD2, PRE_SHIFT, TMP; \ - sllx WORD2, POST_SHIFT, WORD2; \ - or WORD1, TMP, WORD1; \ - srlx WORD3, PRE_SHIFT, TMP; \ - or WORD2, TMP, WORD2; - -8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) - MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) - LOAD(prefetch, %i1 + %i3, #one_read) - - EX_ST(STORE_INIT(%g2, %o0 + 0x00)) - EX_ST(STORE_INIT(%g3, %o0 + 0x08)) - - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) - MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) - - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) - MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%g2, %o0 + 0x20)) - EX_ST(STORE_INIT(%g3, %o0 + 0x28)) - - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) - add %i1, 64, %i1 - MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + mov %g1, %o4 + and %o1, 0x7, %g1 + sll %g1, 3, %g1 + mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2)) + sub %o3, %g1, %o3 + sllx %g2, %g1, %g2 - subcc %g1, 64, %g1 - bne,pt %XCC, 8b +#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ + EX_LD(LOAD(ldx, SRC, TMP1)); \ + srlx TMP1, PRE_SHIFT, TMP2; \ + or TMP2, PRE_VAL, TMP2; \ + EX_ST(STORE_INIT(TMP2, DST)); \ + sllx TMP1, POST_SHIFT, PRE_VAL; + +1: add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) + add %o1, 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32 - 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) + subcc %o4, 64, %o4 + bne,pt %XCC, 1b add %o0, 64, %o0 - ba,pt %XCC, 60f - add %i1, %i4, %i1 - -9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) - MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) - LOAD(prefetch, %i1 + %i3, #one_read) - - EX_ST(STORE_INIT(%g3, %o0 + 0x00)) - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) - - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) - MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) - EX_ST(STORE_INIT(%g2, %o0 + 0x18)) - - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) - MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%g3, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) - - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) - add %i1, 64, %i1 - MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) - EX_ST(STORE_INIT(%g2, %o0 + 0x38)) - - subcc %g1, 64, %g1 - bne,pt %XCC, 9b - add %o0, 64, %o0 +#undef SWIVEL_ONE_DWORD + srl %g1, 3, %g1 ba,pt %XCC, 60f - add %i1, %i4, %i1 + add %o1, %g1, %o1 10: /* Destination is 64-byte aligned, source was only 8-byte * aligned but it has been subtracted by 8 and we perform * one twin load ahead, then add 8 back into source when * we finish the loop. */ - EX_LD(LOAD_TWIN(%i1, %o4, %o5)) - mov 16, %o7 - mov 32, %g2 - mov 48, %g3 - mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) - LOAD(prefetch, %i1 + %o1, #one_read) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) +1: add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) + EX_ST(STORE_INIT(%g2, %o0 + 0x08)) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%g3, %o0 + 0x10)) EX_ST(STORE_INIT(%o4, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16, %o1 EX_ST(STORE_INIT(%o5, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) - EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) - add %i1, 64, %i1 - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) + EX_ST(STORE_INIT(%g2, %o0 + 0x28)) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + EX_ST(STORE_INIT(%g3, %o0 + 0x30)) EX_ST(STORE_INIT(%o4, %o0 + 0x38)) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 ba,pt %XCC, 60f - add %i1, 0x8, %i1 + add %o1, 0x8, %o1 50: /* Destination is 64-byte aligned, and source is 16-byte * aligned. */ - mov 16, %o7 - mov 32, %g2 - mov 48, %g3 - mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) - LOAD(prefetch, %i1 + %o1, #one_read) +1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line EX_ST(STORE_INIT(%o5, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) - add %i1, 64, %i1 + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%g2, %o0 + 0x10)) + EX_ST(STORE_INIT(%g3, %o0 + 0x18)) + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16, %o1 EX_ST(STORE_INIT(%o4, %o0 + 0x20)) EX_ST(STORE_INIT(%o5, %o0 + 0x28)) - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + EX_ST(STORE_INIT(%g2, %o0 + 0x30)) + EX_ST(STORE_INIT(%g3, %o0 + 0x38)) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -304,47 +249,47 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ 60: membar #Sync - /* %i2 contains any final bytes still needed to be copied + /* %o2 contains any final bytes still needed to be copied * over. If anything is left, we copy it one byte at a time. */ - RESTORE_ASI(%i3) - brz,pt %i2, 85f - sub %o0, %i1, %i3 + RESTORE_ASI(%o3) + brz,pt %o2, 85f + sub %o0, %o1, %o3 ba,a,pt %XCC, 90f .align 64 70: /* 16 < len <= 64 */ bne,pn %XCC, 75f - sub %o0, %i1, %i3 + sub %o0, %o1, %o3 72: - andn %i2, 0xf, %i4 - and %i2, 0xf, %i2 -1: subcc %i4, 0x10, %i4 - EX_LD(LOAD(ldx, %i1, %o4)) - add %i1, 0x08, %i1 - EX_LD(LOAD(ldx, %i1, %g1)) - sub %i1, 0x08, %i1 - EX_ST(STORE(stx, %o4, %i1 + %i3)) - add %i1, 0x8, %i1 - EX_ST(STORE(stx, %g1, %i1 + %i3)) + andn %o2, 0xf, %o4 + and %o2, 0xf, %o2 +1: subcc %o4, 0x10, %o4 + EX_LD(LOAD(ldx, %o1, %o5)) + add %o1, 0x08, %o1 + EX_LD(LOAD(ldx, %o1, %g1)) + sub %o1, 0x08, %o1 + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + %o3)) bgu,pt %XCC, 1b - add %i1, 0x8, %i1 -73: andcc %i2, 0x8, %g0 + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop - sub %i2, 0x8, %i2 - EX_LD(LOAD(ldx, %i1, %o4)) - EX_ST(STORE(stx, %o4, %i1 + %i3)) - add %i1, 0x8, %i1 -1: andcc %i2, 0x4, %g0 + sub %o2, 0x8, %o2 + EX_LD(LOAD(ldx, %o1, %o5)) + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop - sub %i2, 0x4, %i2 - EX_LD(LOAD(lduw, %i1, %i5)) - EX_ST(STORE(stw, %i5, %i1 + %i3)) - add %i1, 0x4, %i1 -1: cmp %i2, 0 + sub %o2, 0x4, %o2 + EX_LD(LOAD(lduw, %o1, %o5)) + EX_ST(STORE(stw, %o5, %o1 + %o3)) + add %o1, 0x4, %o1 +1: cmp %o2, 0 be,pt %XCC, 85f nop ba,pt %xcc, 90f @@ -355,71 +300,71 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ sub %g1, 0x8, %g1 be,pn %icc, 2f sub %g0, %g1, %g1 - sub %i2, %g1, %i2 + sub %o2, %g1, %o2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %i1, %i5)) - EX_ST(STORE(stb, %i5, %i1 + %i3)) + EX_LD(LOAD(ldub, %o1, %o5)) + EX_ST(STORE(stb, %o5, %o1 + %o3)) bgu,pt %icc, 1b - add %i1, 1, %i1 + add %o1, 1, %o1 -2: add %i1, %i3, %o0 - andcc %i1, 0x7, %g1 +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 bne,pt %icc, 8f sll %g1, 3, %g1 - cmp %i2, 16 + cmp %o2, 16 bgeu,pt %icc, 72b nop ba,a,pt %xcc, 73b -8: mov 64, %i3 - andn %i1, 0x7, %i1 - EX_LD(LOAD(ldx, %i1, %g2)) - sub %i3, %g1, %i3 - andn %i2, 0x7, %i4 +8: mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2)) + sub %o3, %g1, %o3 + andn %o2, 0x7, %o4 sllx %g2, %g1, %g2 -1: add %i1, 0x8, %i1 - EX_LD(LOAD(ldx, %i1, %g3)) - subcc %i4, 0x8, %i4 - srlx %g3, %i3, %i5 - or %i5, %g2, %i5 - EX_ST(STORE(stx, %i5, %o0)) +1: add %o1, 0x8, %o1 + EX_LD(LOAD(ldx, %o1, %g3)) + subcc %o4, 0x8, %o4 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0)) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 srl %g1, 3, %g1 - andcc %i2, 0x7, %i2 + andcc %o2, 0x7, %o2 be,pn %icc, 85f - add %i1, %g1, %i1 + add %o1, %g1, %o1 ba,pt %xcc, 90f - sub %o0, %i1, %i3 + sub %o0, %o1, %o3 .align 64 80: /* 0 < len <= 16 */ - andcc %i3, 0x3, %g0 + andcc %o3, 0x3, %g0 bne,pn %XCC, 90f - sub %o0, %i1, %i3 + sub %o0, %o1, %o3 1: - subcc %i2, 4, %i2 - EX_LD(LOAD(lduw, %i1, %g1)) - EX_ST(STORE(stw, %g1, %i1 + %i3)) + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1)) + EX_ST(STORE(stw, %g1, %o1 + %o3)) bgu,pt %XCC, 1b - add %i1, 4, %i1 + add %o1, 4, %o1 -85: ret - restore EX_RETVAL(%i0), %g0, %o0 +85: retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 .align 32 90: - subcc %i2, 1, %i2 - EX_LD(LOAD(ldub, %i1, %g1)) - EX_ST(STORE(stb, %g1, %i1 + %i3)) + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o1 + %o3)) bgu,pt %XCC, 90b - add %i1, 1, %i1 - ret - restore EX_RETVAL(%i0), %g0, %o0 + add %o1, 1, %o1 + retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 .size FUNC_NAME, .-FUNC_NAME diff --git a/trunk/arch/x86_64/vdso/voffset.h b/trunk/arch/x86_64/vdso/voffset.h index 4af67c79085f..5304204911f2 100644 --- a/trunk/arch/x86_64/vdso/voffset.h +++ b/trunk/arch/x86_64/vdso/voffset.h @@ -1 +1 @@ -#define VDSO_TEXT_OFFSET 0x600 +#define VDSO_TEXT_OFFSET 0x500 diff --git a/trunk/crypto/async_tx/async_tx.c b/trunk/crypto/async_tx/async_tx.c index bc18cbb8ea79..035007145e78 100644 --- a/trunk/crypto/async_tx/async_tx.c +++ b/trunk/crypto/async_tx/async_tx.c @@ -80,7 +80,6 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { enum dma_status status; struct dma_async_tx_descriptor *iter; - struct dma_async_tx_descriptor *parent; if (!tx) return DMA_SUCCESS; @@ -88,15 +87,8 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) /* poll through the dependency chain, return when tx is complete */ do { iter = tx; - - /* find the root of the unsubmitted dependency chain */ - while (iter->cookie == -EBUSY) { - parent = iter->parent; - if (parent && parent->cookie == -EBUSY) - iter = iter->parent; - else - break; - } + while (iter->cookie == -EBUSY) + iter = iter->parent; status = dma_sync_wait(iter->chan, iter->cookie); } while (status == DMA_IN_PROGRESS || (iter != tx)); diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig index 9685b75898ed..4875f0149eb4 100644 --- a/trunk/drivers/acpi/Kconfig +++ b/trunk/drivers/acpi/Kconfig @@ -117,7 +117,6 @@ config ACPI_BUTTON config ACPI_VIDEO tristate "Video" depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL - depends on INPUT help This driver implement the ACPI Extensions For Display Adapters for integrated graphics devices on motherboard, as specified in diff --git a/trunk/drivers/acpi/hardware/hwsleep.c b/trunk/drivers/acpi/hardware/hwsleep.c index 8181afbd1d4d..cf69c0040a39 100644 --- a/trunk/drivers/acpi/hardware/hwsleep.c +++ b/trunk/drivers/acpi/hardware/hwsleep.c @@ -234,11 +234,15 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) "While executing method _SST")); } - /* Disable/Clear all GPEs */ - + /* + * 1) Disable/Clear all GPEs + */ status = acpi_hw_disable_all_gpes(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } - return_ACPI_STATUS(status); + return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) diff --git a/trunk/drivers/acpi/sleep/Makefile b/trunk/drivers/acpi/sleep/Makefile index f1fb888c2d29..ba9bd403d443 100644 --- a/trunk/drivers/acpi/sleep/Makefile +++ b/trunk/drivers/acpi/sleep/Makefile @@ -1,5 +1,5 @@ obj-y := wakeup.o -obj-y += main.o +obj-$(CONFIG_ACPI_SLEEP) += main.o obj-$(CONFIG_ACPI_SLEEP) += proc.o EXTRA_CFLAGS += $(ACPI_CFLAGS) diff --git a/trunk/drivers/acpi/sleep/main.c b/trunk/drivers/acpi/sleep/main.c index caf8721ae6fb..85633c585aab 100644 --- a/trunk/drivers/acpi/sleep/main.c +++ b/trunk/drivers/acpi/sleep/main.c @@ -24,30 +24,7 @@ u8 sleep_states[ACPI_S_STATE_COUNT]; -#ifdef CONFIG_PM_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; -#endif - -int acpi_sleep_prepare(u32 acpi_state) -{ -#ifdef CONFIG_ACPI_SLEEP - /* do we have a wakeup address for S2 and S3? */ - if (acpi_state == ACPI_STATE_S3) { - if (!acpi_wakeup_address) { - return -EFAULT; - } - acpi_set_firmware_waking_vector((acpi_physical_address) - virt_to_phys((void *) - acpi_wakeup_address)); - - } - ACPI_FLUSH_CPU_CACHE(); - acpi_enable_wakeup_device_prep(acpi_state); -#endif - acpi_gpe_sleep_prepare(acpi_state); - acpi_enter_sleep_state_prep(acpi_state); - return 0; -} #ifdef CONFIG_SUSPEND static struct pm_ops acpi_pm_ops; @@ -83,6 +60,27 @@ static int acpi_pm_set_target(suspend_state_t pm_state) return error; } +int acpi_sleep_prepare(u32 acpi_state) +{ +#ifdef CONFIG_ACPI_SLEEP + /* do we have a wakeup address for S2 and S3? */ + if (acpi_state == ACPI_STATE_S3) { + if (!acpi_wakeup_address) { + return -EFAULT; + } + acpi_set_firmware_waking_vector((acpi_physical_address) + virt_to_phys((void *) + acpi_wakeup_address)); + + } + ACPI_FLUSH_CPU_CACHE(); + acpi_enable_wakeup_device_prep(acpi_state); +#endif + acpi_gpe_sleep_prepare(acpi_state); + acpi_enter_sleep_state_prep(acpi_state); + return 0; +} + /** * acpi_pm_prepare - Do preliminary suspend work. * @pm_state: ignored @@ -256,11 +254,6 @@ static int acpi_hibernation_enter(void) static void acpi_hibernation_finish(void) { - /* - * If ACPI is not enabled by the BIOS and the boot kernel, we need to - * enable it here. - */ - acpi_enable(); acpi_leave_sleep_state(ACPI_STATE_S4); acpi_disable_wakeup_device(ACPI_STATE_S4); @@ -306,7 +299,6 @@ int acpi_suspend(u32 acpi_state) return -EINVAL; } -#ifdef CONFIG_PM_SLEEP /** * acpi_pm_device_sleep_state - return preferred power state of ACPI device * in the system sleep state given by %acpi_target_sleep_state @@ -381,7 +373,6 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) *d_min_p = d_min; return d_max; } -#endif static void acpi_power_off_prepare(void) { diff --git a/trunk/drivers/acpi/tables/tbutils.c b/trunk/drivers/acpi/tables/tbutils.c index 5f1d85f2ffe4..8cc9492ffbf2 100644 --- a/trunk/drivers/acpi/tables/tbutils.c +++ b/trunk/drivers/acpi/tables/tbutils.c @@ -400,7 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) u32 table_count; struct acpi_table_header *table; acpi_physical_address address; - acpi_physical_address uninitialized_var(rsdt_address); + acpi_physical_address rsdt_address; u32 length; u8 *table_entry; acpi_status status; diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 6996eb5b7506..3b8bf1812dc8 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -920,13 +920,6 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) static int piix_broken_suspend(void) { static struct dmi_system_id sysids[] = { - { - .ident = "TECRA M3", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"), - }, - }, { .ident = "TECRA M5", .matches = { diff --git a/trunk/drivers/ata/pata_ixp4xx_cf.c b/trunk/drivers/ata/pata_ixp4xx_cf.c index 5dea3584c6c2..4ca7fd6118d5 100644 --- a/trunk/drivers/ata/pata_ixp4xx_cf.c +++ b/trunk/drivers/ata/pata_ixp4xx_cf.c @@ -189,9 +189,6 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); - if (!data->cs0 || !data->cs1) - return -ENOMEM; - irq = platform_get_irq(pdev, 0); if (irq) set_irq_type(irq, IRQT_RISING); diff --git a/trunk/drivers/ata/pata_marvell.c b/trunk/drivers/ata/pata_marvell.c index b45506f1ef73..ae206f35f747 100644 --- a/trunk/drivers/ata/pata_marvell.c +++ b/trunk/drivers/ata/pata_marvell.c @@ -44,10 +44,10 @@ static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline) return -ENOMEM; printk("BAR5:"); for(i = 0; i <= 0x0F; i++) - printk("%02X:%02X ", i, ioread8(barp + i)); + printk("%02X:%02X ", i, readb(barp + i)); printk("\n"); - devices = ioread32(barp + 0x0C); + devices = readl(barp + 0x0C); pci_iounmap(pdev, barp); if ((pdev->device == 0x6145) && (ap->port_no == 0) && diff --git a/trunk/drivers/ata/pata_sis.c b/trunk/drivers/ata/pata_sis.c index cce2834b2b60..2bd7645f1a88 100644 --- a/trunk/drivers/ata/pata_sis.c +++ b/trunk/drivers/ata/pata_sis.c @@ -375,9 +375,8 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) int drive_pci = sis_old_port_base(adev); u16 timing; - /* MWDMA 0-2 and UDMA 0-5 */ const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; - const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 }; + const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; pci_read_config_word(pdev, drive_pci, &timing); diff --git a/trunk/drivers/ata/sata_mv.c b/trunk/drivers/ata/sata_mv.c index cb7dec97fee6..11bf6c7ac122 100644 --- a/trunk/drivers/ata/sata_mv.c +++ b/trunk/drivers/ata/sata_mv.c @@ -313,10 +313,7 @@ enum { #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) enum { - /* DMA boundary 0xffff is required by the s/g splitting - * we need on /length/ in mv_fill-sg(). - */ - MV_DMA_BOUNDARY = 0xffffU, + MV_DMA_BOUNDARY = 0xffffffffU, /* mask of register bits containing lower 32 bits * of EDMA request queue DMA address @@ -451,7 +448,7 @@ static struct scsi_host_template mv5_sht = { .queuecommand = ata_scsi_queuecmd, .can_queue = ATA_DEF_QUEUE, .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = MV_MAX_SG_CT / 2, + .sg_tablesize = MV_MAX_SG_CT, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, .emulated = ATA_SHT_EMULATED, .use_clustering = 1, @@ -469,7 +466,7 @@ static struct scsi_host_template mv6_sht = { .queuecommand = ata_scsi_queuecmd, .can_queue = ATA_DEF_QUEUE, .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = MV_MAX_SG_CT / 2, + .sg_tablesize = MV_MAX_SG_CT, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, .emulated = ATA_SHT_EMULATED, .use_clustering = 1, @@ -1142,27 +1139,15 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); - while (sg_len) { - u32 offset = addr & 0xffff; - u32 len = sg_len; - - if ((offset + sg_len > 0x10000)) - len = 0x10000 - offset; - - mv_sg->addr = cpu_to_le32(addr & 0xffffffff); - mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); - mv_sg->flags_size = cpu_to_le32(len); + mv_sg->addr = cpu_to_le32(addr & 0xffffffff); + mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); + mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff); - sg_len -= len; - addr += len; - - if (!sg_len && ata_sg_is_last(sg, qc)) - mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); - - mv_sg++; - n_sg++; - } + if (ata_sg_is_last(sg, qc)) + mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); + mv_sg++; + n_sg++; } return n_sg; diff --git a/trunk/drivers/ata/sata_sil24.c b/trunk/drivers/ata/sata_sil24.c index 233e88693395..ef83e6b1e314 100644 --- a/trunk/drivers/ata/sata_sil24.c +++ b/trunk/drivers/ata/sata_sil24.c @@ -888,16 +888,6 @@ static inline void sil24_host_intr(struct ata_port *ap) u32 slot_stat, qc_active; int rc; - /* If PCIX_IRQ_WOC, there's an inherent race window between - * clearing IRQ pending status and reading PORT_SLOT_STAT - * which may cause spurious interrupts afterwards. This is - * unavoidable and much better than losing interrupts which - * happens if IRQ pending is cleared after reading - * PORT_SLOT_STAT. - */ - if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) - writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); - slot_stat = readl(port + PORT_SLOT_STAT); if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { @@ -905,6 +895,9 @@ static inline void sil24_host_intr(struct ata_port *ap) return; } + if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) + writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); + qc_active = slot_stat & ~HOST_SSTAT_ATTN; rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); if (rc > 0) @@ -917,8 +910,7 @@ static inline void sil24_host_intr(struct ata_port *ap) return; } - /* spurious interrupts are expected if PCIX_IRQ_WOC */ - if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) + if (ata_ratelimit()) ata_port_printk(ap, KERN_INFO, "spurious interrupt " "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", slot_stat, ap->active_tag, ap->sactive); diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index ec86d6fc2360..6de33d7a29ba 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -284,7 +284,6 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr, /* let the kset specific function add its keys */ pos = data; - memset(envp, 0, sizeof(envp)); retval = kset->uevent_ops->uevent(kset, &dev->kobj, envp, ARRAY_SIZE(envp), pos, PAGE_SIZE); @@ -586,13 +585,9 @@ void device_initialize(struct device *dev) static struct kobject * get_device_parent(struct device *dev, struct device *parent) { - /* - * Set the parent to the class, not the parent device - * for topmost devices in class hierarchy. - * This keeps sysfs from having a symlink to make old - * udevs happy - */ - if (dev->class && (!parent || parent->class != dev->class)) + /* Set the parent to the class, not the parent device */ + /* this keeps sysfs from having a symlink to make old udevs happy */ + if (dev->class) return &dev->class->subsys.kobj; else if (parent) return &parent->kobj; diff --git a/trunk/drivers/cdrom/cdrom.c b/trunk/drivers/cdrom/cdrom.c index 79245714f0a7..67ee3d4b2878 100644 --- a/trunk/drivers/cdrom/cdrom.c +++ b/trunk/drivers/cdrom/cdrom.c @@ -1032,10 +1032,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp) check_disk_change(ip->i_bdev); return 0; err_release: - if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { - cdi->ops->lock_door(cdi, 0); - cdinfo(CD_OPEN, "door unlocked.\n"); - } cdi->ops->release(cdi); err: cdi->use_count--; diff --git a/trunk/drivers/char/Makefile b/trunk/drivers/char/Makefile index c78ff26647ee..d68ddbe70f73 100644 --- a/trunk/drivers/char/Makefile +++ b/trunk/drivers/char/Makefile @@ -129,7 +129,7 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c ifdef GENERATE_KEYMAP -$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map +$(obj)/defkeymap.c $(obj)/%.c: $(src)/%.map loadkeys --mktable $< > $@.tmp sed -e 's/^static *//' $@.tmp > $@ rm $@.tmp diff --git a/trunk/drivers/char/agp/intel-agp.c b/trunk/drivers/char/agp/intel-agp.c index 141ca176c397..a5d0e95a227a 100644 --- a/trunk/drivers/char/agp/intel-agp.c +++ b/trunk/drivers/char/agp/intel-agp.c @@ -506,6 +506,11 @@ static void intel_i830_init_gtt_entries(void) break; } } else { + /* G33's GTT stolen memory is separate from gfx data + * stolen memory. + */ + if (IS_G33) + size = 0; switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: gtt_entries = MB(1) - KB(size); diff --git a/trunk/drivers/char/drm/i915_drv.h b/trunk/drivers/char/drm/i915_drv.h index 28b98733beb8..737088bd0780 100644 --- a/trunk/drivers/char/drm/i915_drv.h +++ b/trunk/drivers/char/drm/i915_drv.h @@ -210,12 +210,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define I915REG_INT_MASK_R 0x020a8 #define I915REG_INT_ENABLE_R 0x020a0 -#define I915REG_PIPEASTAT 0x70024 -#define I915REG_PIPEBSTAT 0x71024 - -#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define I915_VBLANK_CLEAR (1UL<<1) - #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 diff --git a/trunk/drivers/char/drm/i915_irq.c b/trunk/drivers/char/drm/i915_irq.c index bb8e9e9c8201..4b4b2ce89863 100644 --- a/trunk/drivers/char/drm/i915_irq.c +++ b/trunk/drivers/char/drm/i915_irq.c @@ -214,10 +214,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 temp; - u32 pipea_stats, pipeb_stats; - - pipea_stats = I915_READ(I915REG_PIPEASTAT); - pipeb_stats = I915_READ(I915REG_PIPEBSTAT); temp = I915_READ16(I915REG_INT_IDENTITY_R); @@ -229,8 +225,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; I915_WRITE16(I915REG_INT_IDENTITY_R, temp); - (void) I915_READ16(I915REG_INT_IDENTITY_R); - DRM_READMEMORYBARRIER(); dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); @@ -258,12 +252,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) if (dev_priv->swaps_pending > 0) drm_locked_tasklet(dev, i915_vblank_tasklet); - I915_WRITE(I915REG_PIPEASTAT, - pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| - I915_VBLANK_CLEAR); - I915_WRITE(I915REG_PIPEBSTAT, - pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE| - I915_VBLANK_CLEAR); } return IRQ_HANDLED; diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c index 4c16778e3f84..7ecffc9c738f 100644 --- a/trunk/drivers/char/hpet.c +++ b/trunk/drivers/char/hpet.c @@ -62,8 +62,6 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; -/* This clocksource driver currently only works on ia64 */ -#ifdef CONFIG_IA64 static void __iomem *hpet_mctr; static cycle_t read_hpet(void) @@ -81,7 +79,6 @@ static struct clocksource clocksource_hpet = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clocksource *hpet_clocksource; -#endif /* A lock for concurrent access by app and isr hpet activity. */ static DEFINE_SPINLOCK(hpet_lock); @@ -946,14 +943,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) printk(KERN_DEBUG "%s: 0x%lx is busy\n", __FUNCTION__, hdp->hd_phys_address); iounmap(hdp->hd_address); - return AE_ALREADY_EXISTS; + return -EBUSY; } } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { struct acpi_resource_fixed_memory32 *fixmem32; fixmem32 = &res->data.fixed_memory32; if (!fixmem32) - return AE_NO_MEMORY; + return -EINVAL; hdp->hd_phys_address = fixmem32->address; hdp->hd_address = ioremap(fixmem32->address, @@ -963,7 +960,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) printk(KERN_DEBUG "%s: 0x%lx is busy\n", __FUNCTION__, hdp->hd_phys_address); iounmap(hdp->hd_address); - return AE_ALREADY_EXISTS; + return -EBUSY; } } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { struct acpi_resource_extended_irq *irqp; diff --git a/trunk/drivers/char/mspec.c b/trunk/drivers/char/mspec.c index 04ac155d3a07..049a46cc9f87 100644 --- a/trunk/drivers/char/mspec.c +++ b/trunk/drivers/char/mspec.c @@ -155,22 +155,23 @@ mspec_open(struct vm_area_struct *vma) * mspec_close * * Called when unmapping a device mapping. Frees all mspec pages - * belonging to all the vma's sharing this vma_data structure. + * belonging to the vma. */ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; - int index, last_index; + int index, last_index, result; unsigned long my_page; vdata = vma->vm_private_data; - if (!atomic_dec_and_test(&vdata->refcnt)) - return; + BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); - last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT; - for (index = 0; index < last_index; index++) { + spin_lock(&vdata->lock); + index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; + last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (; index < last_index; index++) { if (vdata->maddr[index] == 0) continue; /* @@ -179,12 +180,20 @@ mspec_close(struct vm_area_struct *vma) */ my_page = vdata->maddr[index]; vdata->maddr[index] = 0; - if (!mspec_zero_block(my_page, PAGE_SIZE)) + spin_unlock(&vdata->lock); + result = mspec_zero_block(my_page, PAGE_SIZE); + if (!result) uncached_free_page(my_page); else printk(KERN_WARNING "mspec_close(): " - "failed to zero page %ld\n", my_page); + "failed to zero page %i\n", + result); + spin_lock(&vdata->lock); } + spin_unlock(&vdata->lock); + + if (!atomic_dec_and_test(&vdata->refcnt)) + return; if (vdata->flags & VMD_VMALLOCED) vfree(vdata); @@ -192,6 +201,7 @@ mspec_close(struct vm_area_struct *vma) kfree(vdata); } + /* * mspec_nopfn * diff --git a/trunk/drivers/char/random.c b/trunk/drivers/char/random.c index af274e5a25ee..397c714cf2ba 100644 --- a/trunk/drivers/char/random.c +++ b/trunk/drivers/char/random.c @@ -1550,13 +1550,11 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, * As close as possible to RFC 793, which * suggests using a 250 kHz clock. * Further reading shows this assumes 2 Mb/s networks. - * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. - * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but - * we also need to limit the resolution so that the u32 seq - * overlaps less than one time per MSL (2 minutes). - * Choosing a clock of 64 ns period is OK. (period of 274 s) + * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. + * That's funny, Linux has one built in! Use it! + * (Networks are faster now - should this be increased?) */ - seq += ktime_get_real().tv64 >> 6; + seq += ktime_get_real().tv64; #if 0 printk("init_seq(%lx, %lx, %d, %d) = %d\n", saddr, daddr, sport, dport, seq); diff --git a/trunk/drivers/char/vt_ioctl.c b/trunk/drivers/char/vt_ioctl.c index 7a61a2a9aafe..c6f6f4209739 100644 --- a/trunk/drivers/char/vt_ioctl.c +++ b/trunk/drivers/char/vt_ioctl.c @@ -770,7 +770,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, /* * Switching-from response */ - acquire_console_sem(); if (vc->vt_newvt >= 0) { if (arg == 0) /* @@ -785,6 +784,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, * complete the switch. */ int newvt; + acquire_console_sem(); newvt = vc->vt_newvt; vc->vt_newvt = -1; i = vc_allocate(newvt); @@ -798,6 +798,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, * other console switches.. */ complete_change_console(vc_cons[newvt].d); + release_console_sem(); } } @@ -809,12 +810,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, /* * If it's just an ACK, ignore it */ - if (arg != VT_ACKACQ) { - release_console_sem(); + if (arg != VT_ACKACQ) return -EINVAL; - } } - release_console_sem(); return 0; @@ -1032,7 +1030,7 @@ static DECLARE_WAIT_QUEUE_HEAD(vt_activate_queue); /* * Sleeps until a vt is activated, or the task is interrupted. Returns - * 0 if activation, -EINTR if interrupted by a signal handler. + * 0 if activation, -EINTR if interrupted. */ int vt_waitactive(int vt) { @@ -1057,7 +1055,7 @@ int vt_waitactive(int vt) break; } release_console_sem(); - retval = -ERESTARTNOHAND; + retval = -EINTR; if (signal_pending(current)) break; schedule(); @@ -1210,18 +1208,15 @@ void change_console(struct vc_data *new_vc) /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else - * is awry. - * - * We need to set vt_newvt *before* sending the signal or we - * have a race. + * is awry */ - vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { /* * It worked. Mark the vt to switch to and * return. The process needs to send us a * VT_RELDISP ioctl to complete the switch. */ + vc->vt_newvt = new_vc->vc_num; return; } diff --git a/trunk/drivers/firewire/Kconfig b/trunk/drivers/firewire/Kconfig index fe9e768cfbc4..d011a76f8e7a 100644 --- a/trunk/drivers/firewire/Kconfig +++ b/trunk/drivers/firewire/Kconfig @@ -11,8 +11,7 @@ config FIREWIRE This is the "Juju" FireWire stack, a new alternative implementation designed for robustness and simplicity. You can build either this stack, or the classic stack (the ieee1394 driver, ohci1394 etc.) - or both. Please read http://wiki.linux1394.org/JujuMigration before - you enable the new stack. + or both. To compile this driver as a module, say M here: the module will be called firewire-core. It functionally replaces ieee1394, raw1394, diff --git a/trunk/drivers/ide/ppc/pmac.c b/trunk/drivers/ide/ppc/pmac.c index 2fb047b898aa..f19eb6daeefd 100644 --- a/trunk/drivers/ide/ppc/pmac.c +++ b/trunk/drivers/ide/ppc/pmac.c @@ -1546,7 +1546,6 @@ static struct pci_device_id pmac_ide_pci_match[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {}, }; static struct pci_driver pmac_ide_pci_driver = { diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 85c51bdc36f1..ba0428d872aa 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1211,42 +1211,12 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } -static void set_mlx_icrc_seg(void *dseg) -{ - u32 *t = dseg; - struct mlx4_wqe_inline_seg *iseg = dseg; - - t[1] = 0; - - /* - * Need a barrier here before writing the byte_count field to - * make sure that all the data is visible before the - * byte_count field is set. Otherwise, if the segment begins - * a new cacheline, the HCA prefetcher could grab the 64-byte - * chunk and get a valid (!= * 0xffffffff) byte count but - * stale data, and end up sending the wrong data. - */ - wmb(); - - iseg->byte_count = cpu_to_be32((1 << 31) | 4); -} - -static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) +static void set_data_seg(struct mlx4_wqe_data_seg *dseg, + struct ib_sge *sg) { + dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); - - /* - * Need a barrier here before writing the byte_count field to - * make sure that all the data is visible before the - * byte_count field is set. Otherwise, if the segment begins - * a new cacheline, the HCA prefetcher could grab the 64-byte - * chunk and get a valid (!= * 0xffffffff) byte count but - * stale data, and end up sending the wrong data. - */ - wmb(); - - dseg->byte_count = cpu_to_be32(sg->length); } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -1255,7 +1225,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct mlx4_ib_qp *qp = to_mqp(ibqp); void *wqe; struct mlx4_wqe_ctrl_seg *ctrl; - struct mlx4_wqe_data_seg *dseg; unsigned long flags; int nreq; int err = 0; @@ -1355,27 +1324,22 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; } - /* - * Write data segments in reverse order, so as to - * overwrite cacheline stamp last within each - * cacheline. This avoids issues with WQE - * prefetching. - */ + for (i = 0; i < wr->num_sge; ++i) { + set_data_seg(wqe, wr->sg_list + i); - dseg = wqe; - dseg += wr->num_sge - 1; - size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); + wqe += sizeof (struct mlx4_wqe_data_seg); + size += sizeof (struct mlx4_wqe_data_seg) / 16; + } /* Add one more inline data segment for ICRC for MLX sends */ - if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI)) { - set_mlx_icrc_seg(dseg + 1); + if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) { + ((struct mlx4_wqe_inline_seg *) wqe)->byte_count = + cpu_to_be32((1 << 31) | 4); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mlx4_wqe_data_seg); size += sizeof (struct mlx4_wqe_data_seg) / 16; } - for (i = wr->num_sge - 1; i >= 0; --i, --dseg) - set_data_seg(dseg, wr->sg_list + i); - ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; diff --git a/trunk/drivers/input/joystick/Kconfig b/trunk/drivers/input/joystick/Kconfig index 7c662ee594a3..e2abe18e575d 100644 --- a/trunk/drivers/input/joystick/Kconfig +++ b/trunk/drivers/input/joystick/Kconfig @@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF config JOYSTICK_XPAD_LEDS bool "LED Support for Xbox360 controller 'BigX' LED" - depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD) + depends on LEDS_CLASS && JOYSTICK_XPAD ---help--- This option enables support for the LED which surrounds the Big X on XBox 360 controller. diff --git a/trunk/drivers/input/mouse/appletouch.c b/trunk/drivers/input/mouse/appletouch.c index a1804bfdbb8c..2bea1b2c631c 100644 --- a/trunk/drivers/input/mouse/appletouch.c +++ b/trunk/drivers/input/mouse/appletouch.c @@ -328,7 +328,6 @@ static void atp_complete(struct urb* urb) { int x, y, x_z, y_z, x_f, y_f; int retval, i, j; - int key; struct atp *dev = urb->context; switch (urb->status) { @@ -469,7 +468,6 @@ static void atp_complete(struct urb* urb) ATP_XFACT, &x_z, &x_f); y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, ATP_YFACT, &y_z, &y_f); - key = dev->data[dev->datalen - 1] & 1; if (x && y) { if (dev->x_old != -1) { @@ -507,7 +505,7 @@ static void atp_complete(struct urb* urb) the first touch unless reinitialised. Do so if it's been idle for a while in order to avoid waking the kernel up several hundred times a second */ - if (!key && atp_is_geyser_3(dev)) { + if (atp_is_geyser_3(dev)) { dev->idlecount++; if (dev->idlecount == 10) { dev->valid = 0; @@ -516,7 +514,7 @@ static void atp_complete(struct urb* urb) } } - input_report_key(dev->input, BTN_LEFT, key); + input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1); input_sync(dev->input); exit: diff --git a/trunk/drivers/isdn/i4l/isdn_common.c b/trunk/drivers/isdn/i4l/isdn_common.c index 4910bca52640..ec5f4046412f 100644 --- a/trunk/drivers/isdn/i4l/isdn_common.c +++ b/trunk/drivers/isdn/i4l/isdn_common.c @@ -1135,7 +1135,7 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off) if (count > dev->drv[drvidx]->stavail) count = dev->drv[drvidx]->stavail; len = dev->drv[drvidx]->interface->readstat(buf, count, - drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL)); + drvidx, isdn_minor2chan(minor)); if (len < 0) { retval = len; goto out; @@ -1207,8 +1207,7 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off */ if (dev->drv[drvidx]->interface->writecmd) retval = dev->drv[drvidx]->interface-> - writecmd(buf, count, drvidx, - isdn_minor2chan(minor - ISDN_MINOR_CTRL)); + writecmd(buf, count, drvidx, isdn_minor2chan(minor)); else retval = count; goto out; diff --git a/trunk/drivers/lguest/lguest_asm.S b/trunk/drivers/lguest/lguest_asm.S index 1ddcd5cd20f6..f182c6a36209 100644 --- a/trunk/drivers/lguest/lguest_asm.S +++ b/trunk/drivers/lguest/lguest_asm.S @@ -22,9 +22,8 @@ jmp lguest_init /*G:055 We create a macro which puts the assembler code between lgstart_ and - * lgend_ markers. These templates are put in the .text section: they can't be - * discarded after boot as we may need to patch modules, too. */ -.text + * lgend_ markers. These templates end up in the .init.text section, so they + * are discarded after boot. */ #define LGUEST_PATCH(name, insns...) \ lgstart_##name: insns; lgend_##name:; \ .globl lgstart_##name; .globl lgend_##name @@ -35,6 +34,7 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) /*:*/ +.text /* These demark the EIP range where host should never deliver interrupts. */ .global lguest_noirq_start .global lguest_noirq_end diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index f96dea975fa5..4d63773ee73a 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; raid5_conf_t *conf = sh->raid_conf; - int i; + int i, more_to_read = 0; pr_debug("%s: stripe %llu\n", __FUNCTION__, (unsigned long long)sh->sector); @@ -522,14 +522,16 @@ static void ops_complete_biofill(void *stripe_head_ref) /* clear completed biofills */ for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; + /* check if this stripe has new incoming reads */ + if (dev->toread) + more_to_read++; /* acknowledge completion of a biofill operation */ - /* and check if we need to reply to a read request, - * new R5_Wantfill requests are held off until - * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) - */ - if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { + /* and check if we need to reply to a read request + */ + if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { struct bio *rbi, *rbi2; + clear_bit(R5_Wantfill, &dev->flags); /* The access to dev->read is outside of the * spin_lock_irq(&conf->device_lock), but is protected @@ -556,7 +558,8 @@ static void ops_complete_biofill(void *stripe_head_ref) return_io(return_bi); - set_bit(STRIPE_HANDLE, &sh->state); + if (more_to_read) + set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } diff --git a/trunk/drivers/media/video/ivtv/ivtv-fileops.c b/trunk/drivers/media/video/ivtv/ivtv-fileops.c index 66ea3cbc369c..0285c4a830eb 100644 --- a/trunk/drivers/media/video/ivtv/ivtv-fileops.c +++ b/trunk/drivers/media/video/ivtv/ivtv-fileops.c @@ -754,11 +754,9 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts) ivtv_yuv_close(itv); } if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) - itv->output_mode = OUT_NONE; - else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV) - itv->output_mode = OUT_NONE; + itv->output_mode = OUT_NONE; else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) - itv->output_mode = OUT_NONE; + itv->output_mode = OUT_NONE; itv->speed = 0; clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); diff --git a/trunk/drivers/media/video/usbvision/usbvision-video.c b/trunk/drivers/media/video/usbvision/usbvision-video.c index 0cb006f2943d..e3371f972240 100644 --- a/trunk/drivers/media/video/usbvision/usbvision-video.c +++ b/trunk/drivers/media/video/usbvision/usbvision-video.c @@ -1387,6 +1387,7 @@ static const struct file_operations usbvision_fops = { .ioctl = video_ioctl2, .llseek = no_llseek, /* .poll = video_poll, */ + .mmap = usbvision_v4l2_mmap, .compat_ioctl = v4l_compat_ioctl32, }; static struct video_device usbvision_video_template = { @@ -1412,7 +1413,7 @@ static struct video_device usbvision_video_template = { .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, - .vidioc_s_audio = vidioc_s_audio, + .vidioc_g_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, @@ -1458,7 +1459,7 @@ static struct video_device usbvision_radio_template= .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, - .vidioc_s_audio = vidioc_s_audio, + .vidioc_g_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_tuner = vidioc_g_tuner, diff --git a/trunk/drivers/misc/Kconfig b/trunk/drivers/misc/Kconfig index 7b580c3152de..73e248fb2ff1 100644 --- a/trunk/drivers/misc/Kconfig +++ b/trunk/drivers/misc/Kconfig @@ -134,7 +134,6 @@ config SONY_LAPTOP tristate "Sony Laptop Extras" depends on X86 && ACPI select BACKLIGHT_CLASS_DEVICE - depends on INPUT ---help--- This mini-driver drives the SNC and SPIC devices present in the ACPI BIOS of the Sony Vaio laptops. @@ -157,7 +156,6 @@ config THINKPAD_ACPI select BACKLIGHT_CLASS_DEVICE select HWMON select NVRAM - depends on INPUT ---help--- This is a driver for the IBM and Lenovo ThinkPad laptops. It adds support for Fn-Fx key combinations, Bluetooth control, video diff --git a/trunk/drivers/misc/sony-laptop.c b/trunk/drivers/misc/sony-laptop.c index f248080828f2..d38ddce592c0 100644 --- a/trunk/drivers/misc/sony-laptop.c +++ b/trunk/drivers/misc/sony-laptop.c @@ -1173,8 +1173,7 @@ static struct acpi_driver sony_nc_driver = { #define SONYPI_TYPE3_OFFSET 0x12 struct sony_pic_ioport { - struct acpi_resource_io io1; - struct acpi_resource_io io2; + struct acpi_resource_io io; struct list_head list; }; @@ -1444,11 +1443,11 @@ static u8 sony_pic_call1(u8 dev) { u8 v1, v2; - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); - outb(dev, spic_dev.cur_ioport->io1.minimum + 4); - v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4); - v2 = inb_p(spic_dev.cur_ioport->io1.minimum); + outb(dev, spic_dev.cur_ioport->io.minimum + 4); + v1 = inb_p(spic_dev.cur_ioport->io.minimum + 4); + v2 = inb_p(spic_dev.cur_ioport->io.minimum); dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1); return v2; } @@ -1457,13 +1456,13 @@ static u8 sony_pic_call2(u8 dev, u8 fn) { u8 v1; - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); - outb(dev, spic_dev.cur_ioport->io1.minimum + 4); - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, + outb(dev, spic_dev.cur_ioport->io.minimum + 4); + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); - outb(fn, spic_dev.cur_ioport->io1.minimum); - v1 = inb_p(spic_dev.cur_ioport->io1.minimum); + outb(fn, spic_dev.cur_ioport->io.minimum); + v1 = inb_p(spic_dev.cur_ioport->io.minimum); dprintk("sony_pic_call2: 0x%.4x\n", v1); return v1; } @@ -1472,13 +1471,13 @@ static u8 sony_pic_call3(u8 dev, u8 fn, u8 v) { u8 v1; - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); - outb(dev, spic_dev.cur_ioport->io1.minimum + 4); - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); - outb(fn, spic_dev.cur_ioport->io1.minimum); - wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); - outb(v, spic_dev.cur_ioport->io1.minimum); - v1 = inb_p(spic_dev.cur_ioport->io1.minimum); + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); + outb(dev, spic_dev.cur_ioport->io.minimum + 4); + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); + outb(fn, spic_dev.cur_ioport->io.minimum); + wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); + outb(v, spic_dev.cur_ioport->io.minimum); + v1 = inb_p(spic_dev.cur_ioport->io.minimum); dprintk("sony_pic_call3: 0x%.4x\n", v1); return v1; } @@ -2075,18 +2074,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) switch (resource->type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: - { - /* start IO enumeration */ - struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); - if (!ioport) - return AE_ERROR; - - list_add(&ioport->list, &dev->ioports); - return AE_OK; - } - case ACPI_RESOURCE_TYPE_END_DEPENDENT: - /* end IO enumeration */ return AE_OK; case ACPI_RESOURCE_TYPE_IRQ: @@ -2113,7 +2101,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) if (!interrupt) return AE_ERROR; - list_add(&interrupt->list, &dev->interrupts); + list_add_tail(&interrupt->list, &dev->interrupts); interrupt->irq.triggering = p->triggering; interrupt->irq.polarity = p->polarity; interrupt->irq.sharable = p->sharable; @@ -2125,27 +2113,18 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) case ACPI_RESOURCE_TYPE_IO: { struct acpi_resource_io *io = &resource->data.io; - struct sony_pic_ioport *ioport = - list_first_entry(&dev->ioports, struct sony_pic_ioport, list); + struct sony_pic_ioport *ioport = NULL; if (!io) { dprintk("Blank IO resource\n"); return AE_OK; } - if (!ioport->io1.minimum) { - memcpy(&ioport->io1, io, sizeof(*io)); - dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum, - ioport->io1.address_length); - } - else if (!ioport->io2.minimum) { - memcpy(&ioport->io2, io, sizeof(*io)); - dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum, - ioport->io2.address_length); - } - else { - printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); + ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); + if (!ioport) return AE_ERROR; - } + + list_add_tail(&ioport->list, &dev->ioports); + memcpy(&ioport->io, io, sizeof(*io)); return AE_OK; } default: @@ -2220,22 +2199,10 @@ static int sony_pic_enable(struct acpi_device *device, { acpi_status status; int result = 0; - /* Type 1 resource layout is: - * IO - * IO - * IRQNoFlags - * End - * - * Type 2 and 3 resource layout is: - * IO - * IRQNoFlags - * End - */ struct { - struct acpi_resource res1; - struct acpi_resource res2; - struct acpi_resource res3; - struct acpi_resource res4; + struct acpi_resource io_res; + struct acpi_resource irq_res; + struct acpi_resource end; } *resource; struct acpi_buffer buffer = { 0, NULL }; @@ -2250,49 +2217,21 @@ static int sony_pic_enable(struct acpi_device *device, buffer.length = sizeof(*resource) + 1; buffer.pointer = resource; - /* setup Type 1 resources */ - if (spic_dev.model == SONYPI_DEVICE_TYPE1) { + /* setup io resource */ + resource->io_res.type = ACPI_RESOURCE_TYPE_IO; + resource->io_res.length = sizeof(struct acpi_resource); + memcpy(&resource->io_res.data.io, &ioport->io, + sizeof(struct acpi_resource_io)); - /* setup io resources */ - resource->res1.type = ACPI_RESOURCE_TYPE_IO; - resource->res1.length = sizeof(struct acpi_resource); - memcpy(&resource->res1.data.io, &ioport->io1, - sizeof(struct acpi_resource_io)); + /* setup irq resource */ + resource->irq_res.type = ACPI_RESOURCE_TYPE_IRQ; + resource->irq_res.length = sizeof(struct acpi_resource); + memcpy(&resource->irq_res.data.irq, &irq->irq, + sizeof(struct acpi_resource_irq)); + /* we requested a shared irq */ + resource->irq_res.data.irq.sharable = ACPI_SHARED; - resource->res2.type = ACPI_RESOURCE_TYPE_IO; - resource->res2.length = sizeof(struct acpi_resource); - memcpy(&resource->res2.data.io, &ioport->io2, - sizeof(struct acpi_resource_io)); - - /* setup irq resource */ - resource->res3.type = ACPI_RESOURCE_TYPE_IRQ; - resource->res3.length = sizeof(struct acpi_resource); - memcpy(&resource->res3.data.irq, &irq->irq, - sizeof(struct acpi_resource_irq)); - /* we requested a shared irq */ - resource->res3.data.irq.sharable = ACPI_SHARED; - - resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; - - } - /* setup Type 2/3 resources */ - else { - /* setup io resource */ - resource->res1.type = ACPI_RESOURCE_TYPE_IO; - resource->res1.length = sizeof(struct acpi_resource); - memcpy(&resource->res1.data.io, &ioport->io1, - sizeof(struct acpi_resource_io)); - - /* setup irq resource */ - resource->res2.type = ACPI_RESOURCE_TYPE_IRQ; - resource->res2.length = sizeof(struct acpi_resource); - memcpy(&resource->res2.data.irq, &irq->irq, - sizeof(struct acpi_resource_irq)); - /* we requested a shared irq */ - resource->res2.data.irq.sharable = ACPI_SHARED; - - resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; - } + resource->end.type = ACPI_RESOURCE_TYPE_END_TAG; /* Attempt to set the resource */ dprintk("Evaluating _SRS\n"); @@ -2300,7 +2239,7 @@ static int sony_pic_enable(struct acpi_device *device, /* check for total failure */ if (ACPI_FAILURE(status)) { - printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); + printk(KERN_ERR DRV_PFX "Error evaluating _SRS"); result = -ENODEV; goto end; } @@ -2329,14 +2268,11 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; - ev = inb_p(dev->cur_ioport->io1.minimum); - if (dev->cur_ioport->io2.minimum) - data_mask = inb_p(dev->cur_ioport->io2.minimum); - else - data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset); + ev = inb_p(dev->cur_ioport->io.minimum); + data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset); dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", - ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset); + ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset); if (ev == 0x00 || ev == 0xff) return IRQ_HANDLED; @@ -2387,11 +2323,8 @@ static int sony_pic_remove(struct acpi_device *device, int type) } free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); - release_region(spic_dev.cur_ioport->io1.minimum, - spic_dev.cur_ioport->io1.address_length); - if (spic_dev.cur_ioport->io2.minimum) - release_region(spic_dev.cur_ioport->io2.minimum, - spic_dev.cur_ioport->io2.address_length); + release_region(spic_dev.cur_ioport->io.minimum, + spic_dev.cur_ioport->io.address_length); sonypi_compat_exit(); @@ -2464,36 +2397,14 @@ static int sony_pic_add(struct acpi_device *device) goto err_remove_input; /* request io port */ - list_for_each_entry_reverse(io, &spic_dev.ioports, list) { - if (request_region(io->io1.minimum, io->io1.address_length, + list_for_each_entry(io, &spic_dev.ioports, list) { + if (request_region(io->io.minimum, io->io.address_length, "Sony Programable I/O Device")) { - dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", - io->io1.minimum, io->io1.maximum, - io->io1.address_length); - /* Type 1 have 2 ioports */ - if (io->io2.minimum) { - if (request_region(io->io2.minimum, - io->io2.address_length, - "Sony Programable I/O Device")) { - dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", - io->io2.minimum, io->io2.maximum, - io->io2.address_length); - spic_dev.cur_ioport = io; - break; - } - else { - dprintk("Unable to get I/O port2: " - "0x%.4x (0x%.4x) + 0x%.2x\n", - io->io2.minimum, io->io2.maximum, - io->io2.address_length); - release_region(io->io1.minimum, - io->io1.address_length); - } - } - else { - spic_dev.cur_ioport = io; - break; - } + dprintk("I/O port: 0x%.4x (0x%.4x) + 0x%.2x\n", + io->io.minimum, io->io.maximum, + io->io.address_length); + spic_dev.cur_ioport = io; + break; } } if (!spic_dev.cur_ioport) { @@ -2503,7 +2414,7 @@ static int sony_pic_add(struct acpi_device *device) } /* request IRQ */ - list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { + list_for_each_entry(irq, &spic_dev.interrupts, list) { if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, IRQF_SHARED, "sony-laptop", &spic_dev)) { dprintk("IRQ: %d - triggering: %d - " @@ -2551,11 +2462,8 @@ static int sony_pic_add(struct acpi_device *device) free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); err_release_region: - release_region(spic_dev.cur_ioport->io1.minimum, - spic_dev.cur_ioport->io1.address_length); - if (spic_dev.cur_ioport->io2.minimum) - release_region(spic_dev.cur_ioport->io2.minimum, - spic_dev.cur_ioport->io2.address_length); + release_region(spic_dev.cur_ioport->io.minimum, + spic_dev.cur_ioport->io.address_length); err_remove_compat: sonypi_compat_exit(); diff --git a/trunk/drivers/misc/thinkpad_acpi.h b/trunk/drivers/misc/thinkpad_acpi.h index 082a1cbc16c0..fa64dedd26d9 100644 --- a/trunk/drivers/misc/thinkpad_acpi.h +++ b/trunk/drivers/misc/thinkpad_acpi.h @@ -233,22 +233,22 @@ struct ibm_init_struct { static struct { #ifdef CONFIG_THINKPAD_ACPI_BAY - u16 bay_status:1; - u16 bay_eject:1; - u16 bay_status2:1; - u16 bay_eject2:1; + u32 bay_status:1; + u32 bay_eject:1; + u32 bay_status2:1; + u32 bay_eject2:1; #endif - u16 bluetooth:1; - u16 hotkey:1; - u16 hotkey_mask:1; - u16 hotkey_wlsw:1; - u16 light:1; - u16 light_status:1; - u16 wan:1; - u16 fan_ctrl_status_undef:1; - u16 input_device_registered:1; - u16 platform_drv_registered:1; - u16 platform_drv_attrs_registered:1; + u32 bluetooth:1; + u32 hotkey:1; + u32 hotkey_mask:1; + u32 hotkey_wlsw:1; + u32 light:1; + u32 light_status:1; + u32 wan:1; + u32 fan_ctrl_status_undef:1; + u32 input_device_registered:1; + u32 platform_drv_registered:1; + u32 platform_drv_attrs_registered:1; } tp_features; struct thinkpad_id_data { diff --git a/trunk/drivers/net/e1000/e1000_ethtool.c b/trunk/drivers/net/e1000/e1000_ethtool.c index 9ecc3adcf6c1..4c3785c9d4b8 100644 --- a/trunk/drivers/net/e1000/e1000_ethtool.c +++ b/trunk/drivers/net/e1000/e1000_ethtool.c @@ -1726,7 +1726,6 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: - case E1000_DEV_ID_82571PT_QUAD_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: /* quad port adapters only support WoL on port A */ if (!adapter->quad_port_a) { diff --git a/trunk/drivers/net/e1000/e1000_hw.c b/trunk/drivers/net/e1000/e1000_hw.c index 8604adbe351c..ba120f7fb0be 100644 --- a/trunk/drivers/net/e1000/e1000_hw.c +++ b/trunk/drivers/net/e1000/e1000_hw.c @@ -387,7 +387,6 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82571EB_SERDES_DUAL: case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82571EB_QUAD_COPPER: - case E1000_DEV_ID_82571PT_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: hw->mac_type = e1000_82571; diff --git a/trunk/drivers/net/e1000/e1000_hw.h b/trunk/drivers/net/e1000/e1000_hw.h index 07f0ea73676e..fe8714655c90 100644 --- a/trunk/drivers/net/e1000/e1000_hw.h +++ b/trunk/drivers/net/e1000/e1000_hw.h @@ -475,7 +475,6 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); #define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_SERDES 0x1060 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 -#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 #define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index e7c8951f47fa..4a225950fb43 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -108,7 +108,6 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x10BC), INTEL_E1000_ETHERNET_DEVICE(0x10C4), INTEL_E1000_ETHERNET_DEVICE(0x10C5), - INTEL_E1000_ETHERNET_DEVICE(0x10D5), INTEL_E1000_ETHERNET_DEVICE(0x10D9), INTEL_E1000_ETHERNET_DEVICE(0x10DA), /* required last entry */ @@ -1102,7 +1101,6 @@ e1000_probe(struct pci_dev *pdev, case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: - case E1000_DEV_ID_82571PT_QUAD_COPPER: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; diff --git a/trunk/drivers/net/mv643xx_eth.c b/trunk/drivers/net/mv643xx_eth.c index 315335671f0f..6a117e9968cb 100644 --- a/trunk/drivers/net/mv643xx_eth.c +++ b/trunk/drivers/net/mv643xx_eth.c @@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) } /* PHY status changed */ - if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) { + if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { struct ethtool_cmd cmd; if (mii_link_ok(&mp->mii)) { @@ -1357,6 +1357,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) #endif dev->watchdog_timeo = 2 * HZ; + dev->tx_queue_len = mp->tx_ring_size; dev->base_addr = 0; dev->change_mtu = mv643xx_eth_change_mtu; dev->do_ioctl = mv643xx_eth_do_ioctl; @@ -2767,6 +2768,8 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { .get_stats_count = mv643xx_get_stats_count, .get_ethtool_stats = mv643xx_get_ethtool_stats, .get_strings = mv643xx_get_strings, + .get_stats_count = mv643xx_get_stats_count, + .get_ethtool_stats = mv643xx_get_ethtool_stats, .nway_reset = mv643xx_eth_nway_restart, }; diff --git a/trunk/drivers/net/mv643xx_eth.h b/trunk/drivers/net/mv643xx_eth.h index 565b96696aca..82f8c0cbfb64 100644 --- a/trunk/drivers/net/mv643xx_eth.h +++ b/trunk/drivers/net/mv643xx_eth.h @@ -64,9 +64,7 @@ #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) #define ETH_INT_CAUSE_PHY 0x00010000 -#define ETH_INT_CAUSE_STATE 0x00100000 -#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ - ETH_INT_CAUSE_STATE) +#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) #define ETH_INT_MASK_ALL 0x00000000 #define ETH_INT_MASK_ALL_EXT 0x00000000 diff --git a/trunk/drivers/net/pcmcia/3c589_cs.c b/trunk/drivers/net/pcmcia/3c589_cs.c index 503f2685fb73..c06cae3f0b56 100644 --- a/trunk/drivers/net/pcmcia/3c589_cs.c +++ b/trunk/drivers/net/pcmcia/3c589_cs.c @@ -116,7 +116,7 @@ struct el3_private { spinlock_t lock; }; -static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; +static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" }; /*====================================================================*/ diff --git a/trunk/drivers/net/ppp_mppe.c b/trunk/drivers/net/ppp_mppe.c index c0b6d19d1457..f79cf87a2bff 100644 --- a/trunk/drivers/net/ppp_mppe.c +++ b/trunk/drivers/net/ppp_mppe.c @@ -136,7 +136,7 @@ struct ppp_mppe_state { * Key Derivation, from RFC 3078, RFC 3079. * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. */ -static void get_new_key_from_sha(struct ppp_mppe_state * state) +static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) { struct hash_desc desc; struct scatterlist sg[4]; @@ -153,6 +153,8 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) desc.flags = 0; crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); + + memcpy(InterimKey, state->sha1_digest, state->keylen); } /* @@ -161,21 +163,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) */ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { + unsigned char InterimKey[MPPE_MAX_KEY_LEN]; struct scatterlist sg_in[1], sg_out[1]; struct blkcipher_desc desc = { .tfm = state->arc4 }; - get_new_key_from_sha(state); + get_new_key_from_sha(state, InterimKey); if (!initial_key) { - crypto_blkcipher_setkey(state->arc4, state->sha1_digest, - state->keylen); - setup_sg(sg_in, state->sha1_digest, state->keylen); + crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); + setup_sg(sg_in, InterimKey, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { - memcpy(state->session_key, state->sha1_digest, state->keylen); + memcpy(state->session_key, InterimKey, state->keylen); } if (state->keylen == 8) { /* See RFC 3078 */ diff --git a/trunk/drivers/net/qla3xxx.c b/trunk/drivers/net/qla3xxx.c index ea151315050c..69da95b5ad0c 100755 --- a/trunk/drivers/net/qla3xxx.c +++ b/trunk/drivers/net/qla3xxx.c @@ -2248,13 +2248,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; - rmb(); - /* - * Fix 4032 chipe undocumented "feature" where bit-8 is set if the - * inbound completion is for a VLAN. - */ - if (qdev->device_id == QL3032_DEVICE_ID) - net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index c76dd29c8e9a..b85ab4a8f2a3 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -1228,10 +1228,7 @@ static void rtl8169_hw_phy_config(struct net_device *dev) return; } - if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && - (tp->mac_version != RTL_GIGA_MAC_VER_03)) - return; - + /* phy config for RTL8169s mac_version C chip */ mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 @@ -1918,11 +1915,7 @@ static void rtl_hw_start_8169(struct net_device *dev) rtl_set_rx_max_size(ioaddr); - if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || - (tp->mac_version == RTL_GIGA_MAC_VER_02) || - (tp->mac_version == RTL_GIGA_MAC_VER_03) || - (tp->mac_version == RTL_GIGA_MAC_VER_04)) - rtl_set_rx_tx_config_registers(tp); + rtl_set_rx_tx_config_registers(tp); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; @@ -1945,14 +1938,6 @@ static void rtl_hw_start_8169(struct net_device *dev) rtl_set_rx_tx_desc_registers(tp, ioaddr); - if ((tp->mac_version != RTL_GIGA_MAC_VER_01) && - (tp->mac_version != RTL_GIGA_MAC_VER_02) && - (tp->mac_version != RTL_GIGA_MAC_VER_03) && - (tp->mac_version != RTL_GIGA_MAC_VER_04)) { - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_tx_config_registers(tp); - } - RTL_W8(Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ @@ -1967,6 +1952,8 @@ static void rtl_hw_start_8169(struct net_device *dev) /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16(IntrMask, tp->intr_event); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); } static void rtl_hw_start_8168(struct net_device *dev) @@ -2580,15 +2567,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev, (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { netif_wake_queue(dev); } - /* - * 8168 hack: TxPoll requests are lost when the Tx packets are - * too close. Let's kick an extra TxPoll request when a burst - * of start_xmit activity is detected (if it is not detected, - * it is slow enough). -- FR - */ - smp_rmb(); - if (tp->cur_tx != dirty_tx) - RTL_W8(TxPoll, NPQ); } } diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index ea117fc3d5e3..eaffe551d1d8 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -338,16 +338,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) if (!(hw->flags & SKY2_HW_GIGABIT)) { /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; - - if (hw->chip_id == CHIP_ID_YUKON_FE_P && - hw->chip_rev == CHIP_REV_YU_FE2_A0) { - u16 spec; - - /* Enable Class A driver for FE+ A0 */ - spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); - spec |= PHY_M_FESC_SEL_CL_A; - gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); - } } else { /* disable energy detect */ ctrl &= ~PHY_M_PC_EN_DET_MSK; @@ -826,8 +816,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); - /* On chips without ram buffer, pause is controled by MAC level */ - if (sky2_read8(hw, B2_E_0) == 0) { + if (!(hw->flags & SKY2_HW_RAMBUFFER)) { sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); @@ -910,20 +899,6 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) return le; } -static void tx_init(struct sky2_port *sky2) -{ - struct sky2_tx_le *le; - - sky2->tx_prod = sky2->tx_cons = 0; - sky2->tx_tcpsum = 0; - sky2->tx_last_mss = 0; - - le = get_tx_le(sky2); - le->addr = 0; - le->opcode = OP_ADDR64 | HW_OWNER; - sky2->tx_addr64 = 0; -} - static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, struct sky2_tx_le *le) { @@ -1296,7 +1271,7 @@ static int sky2_up(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - u32 imask, ramsize; + u32 imask; int cap, err = -ENOMEM; struct net_device *otherdev = hw->dev[sky2->port^1]; @@ -1334,8 +1309,7 @@ static int sky2_up(struct net_device *dev) GFP_KERNEL); if (!sky2->tx_ring) goto err_out; - - tx_init(sky2); + sky2->tx_prod = sky2->tx_cons = 0; sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, &sky2->rx_le_map); @@ -1352,12 +1326,13 @@ static int sky2_up(struct net_device *dev) sky2_mac_init(hw, port); - /* Register is number of 4K blocks on internal RAM buffer. */ - ramsize = sky2_read8(hw, B2_E_0) * 4; - if (ramsize > 0) { + if (hw->flags & SKY2_HW_RAMBUFFER) { + /* Register is number of 4K blocks on internal RAM buffer. */ + u32 ramsize = sky2_read8(hw, B2_E_0) * 4; u32 rxspace; - pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); + printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize); + if (ramsize < 16) rxspace = ramsize / 2; else @@ -2020,7 +1995,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) synchronize_irq(hw->pdev->irq); - if (sky2_read8(hw, B2_E_0) == 0) + if (!(hw->flags & SKY2_HW_RAMBUFFER)) sky2_set_tx_stfwd(hw, port); ctl = gma_read16(hw, port, GM_GP_CTRL); @@ -2163,15 +2138,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev, sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); - /* This chip has hardware problems that generates bogus status. - * So do only marginal checking and expect higher level protocols - * to handle crap frames. - */ - if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && - sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && - length != count) - goto okay; - if (status & GMR_FS_ANY_ERR) goto error; @@ -2180,9 +2146,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev, /* if length reported by DMA does not match PHY, packet was truncated */ if (length != count) - goto len_error; + goto len_mismatch; -okay: if (length < copybreak) skb = receive_copy(sky2, re, length); else @@ -2192,13 +2157,13 @@ static struct sk_buff *sky2_receive(struct net_device *dev, return skb; -len_error: +len_mismatch: /* Truncation of overlength packets causes PHY length to not match MAC length */ ++sky2->net_stats.rx_length_errors; if (netif_msg_rx_err(sky2) && net_ratelimit()) - pr_info(PFX "%s: rx length error: status %#x length %d\n", - dev->name, status, length); + pr_info(PFX "%s: rx length mismatch: length %d status %#x\n", + dev->name, length, status); goto resubmit; error: @@ -2561,7 +2526,7 @@ static void sky2_watchdog(unsigned long arg) ++active; /* For chips with Rx FIFO, check if stuck */ - if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) && + if ((hw->flags & SKY2_HW_RAMBUFFER) && sky2_rx_hung(dev)) { pr_info(PFX "%s: receiver hang detected\n", dev->name); @@ -2719,10 +2684,8 @@ static int __devinit sky2_init(struct sky2_hw *hw) switch(hw->chip_id) { case CHIP_ID_YUKON_XL: hw->flags = SKY2_HW_GIGABIT - | SKY2_HW_NEWER_PHY; - if (hw->chip_rev < 3) - hw->flags |= SKY2_HW_FIFO_HANG_CHECK; - + | SKY2_HW_NEWER_PHY + | SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_EC_U: @@ -2748,10 +2711,11 @@ static int __devinit sky2_init(struct sky2_hw *hw) dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); return -EOPNOTSUPP; } - hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK; + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_FE: + hw->flags = SKY2_HW_RAMBUFFER; break; case CHIP_ID_YUKON_FE_P: @@ -3959,6 +3923,13 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->hw = hw; sky2->msg_enable = netif_msg_init(debug, default_msg); + /* This chip has hardware problems that generates + * bogus PHY receive status so by default shut up the message. + */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + sky2->msg_enable &= ~NETIF_MSG_RX_ERR; + /* Auto speed and flow control */ sky2->autoneg = AUTONEG_ENABLE; sky2->flow_mode = FC_BOTH; @@ -3982,12 +3953,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, dev->features |= NETIF_F_HIGHDMA; #ifdef SKY2_VLAN_TAG_USED - /* The workaround for FE+ status conflicts with VLAN tag detection. */ - if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && - sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_rx_register = sky2_vlan_rx_register; - } + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_rx_register = sky2_vlan_rx_register; #endif /* read the mac address */ diff --git a/trunk/drivers/net/sky2.h b/trunk/drivers/net/sky2.h index 8bc5c54e3efa..69cd98400fe6 100644 --- a/trunk/drivers/net/sky2.h +++ b/trunk/drivers/net/sky2.h @@ -2063,7 +2063,7 @@ struct sky2_hw { #define SKY2_HW_FIBRE_PHY 0x00000002 #define SKY2_HW_GIGABIT 0x00000004 #define SKY2_HW_NEWER_PHY 0x00000008 -#define SKY2_HW_FIFO_HANG_CHECK 0x00000010 +#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */ #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ diff --git a/trunk/drivers/net/usb/dm9601.c b/trunk/drivers/net/usb/dm9601.c index a2de32fabc17..16c7a0e87850 100644 --- a/trunk/drivers/net/usb/dm9601.c +++ b/trunk/drivers/net/usb/dm9601.c @@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->hard_header_len += DM_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; + dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD; dev->mii.dev = dev->net; dev->mii.mdio_read = dm9601_mdio_read; diff --git a/trunk/drivers/net/wireless/Makefile b/trunk/drivers/net/wireless/Makefile index 4eb6d9752881..ef35bc6c4a22 100644 --- a/trunk/drivers/net/wireless/Makefile +++ b/trunk/drivers/net/wireless/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_ZD1201) += zd1201.o -obj-$(CONFIG_LIBERTAS) += libertas/ +obj-$(CONFIG_LIBERTAS_USB) += libertas/ rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o obj-$(CONFIG_RTL8187) += rtl8187.o diff --git a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index 6acfdc49dccd..d6d9413d7f23 100644 --- a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_wx.c @@ -444,7 +444,7 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev, u16 maxpower; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) { - printk(KERN_ERR PFX "TX power not in dBm.\n"); + printk(PFX KERN_ERR "TX power not in dBm.\n"); return -EOPNOTSUPP; } diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index 50f2dd9e1bb2..7dcaa09b3c20 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -1444,6 +1444,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); static void __devinit quirk_e100_interrupt(struct pci_dev *dev) { u16 command; + u32 bar; u8 __iomem *csr; u8 cmd_hi; @@ -1475,12 +1476,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev) * re-enable them when it's ready. */ pci_read_config_word(dev, PCI_COMMAND, &command); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); - if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0)) + if (!(command & PCI_COMMAND_MEMORY) || !bar) return; - /* Convert from PCI bus to resource space. */ - csr = ioremap(pci_resource_start(dev, 0), 8); + csr = ioremap(bar, 8); if (!csr) { printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", pci_name(dev)); diff --git a/trunk/drivers/power/power_supply_sysfs.c b/trunk/drivers/power/power_supply_sysfs.c index de3155b21285..c7c4574729b1 100644 --- a/trunk/drivers/power/power_supply_sysfs.c +++ b/trunk/drivers/power/power_supply_sysfs.c @@ -289,7 +289,6 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp, if (ret) goto out; } - envp[i] = NULL; out: free_page((unsigned long)prop_buf); diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_task.c b/trunk/drivers/scsi/aic94xx/aic94xx_task.c index ab13824df856..d5d8caba3560 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_task.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_task.c @@ -451,7 +451,7 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, struct scb *scb; pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); + PCI_DMA_FROMDEVICE); pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); @@ -486,7 +486,7 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a) BUG_ON(!task); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); + PCI_DMA_FROMDEVICE); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); } diff --git a/trunk/drivers/scsi/esp_scsi.c b/trunk/drivers/scsi/esp_scsi.c index 95cf7b6cd622..77b06a983fa7 100644 --- a/trunk/drivers/scsi/esp_scsi.c +++ b/trunk/drivers/scsi/esp_scsi.c @@ -2314,7 +2314,6 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) esp->host->transportt = esp_transport_template; esp->host->max_lun = ESP_MAX_LUN; esp->host->cmd_per_lun = 2; - esp->host->unique_id = instance; esp_set_clock_params(esp); @@ -2338,7 +2337,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) if (err) return err; - instance++; + esp->host->unique_id = instance++; scsi_scan_host(esp->host); diff --git a/trunk/drivers/scsi/megaraid.c b/trunk/drivers/scsi/megaraid.c index da56163c30a8..3907f6718ede 100644 --- a/trunk/drivers/scsi/megaraid.c +++ b/trunk/drivers/scsi/megaraid.c @@ -1753,14 +1753,6 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) *len = 0; - if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) { - sg = scsi_sglist(cmd); - scb->dma_h_bulkdata = sg_dma_address(sg); - *buf = (u32)scb->dma_h_bulkdata; - *len = sg_dma_len(sg); - return 0; - } - scsi_for_each_sg(cmd, sg, sgcnt, idx) { if (adapter->has_64bit_addr) { scb->sgl64[idx].address = sg_dma_address(sg); diff --git a/trunk/drivers/scsi/scsi_transport_spi.c b/trunk/drivers/scsi/scsi_transport_spi.c index 4df21c92ff1e..6f56f8750635 100644 --- a/trunk/drivers/scsi/scsi_transport_spi.c +++ b/trunk/drivers/scsi/scsi_transport_spi.c @@ -787,12 +787,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; - int min_period = spi_min_period(starget); - int max_width = spi_max_width(starget); /* first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); - + if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); @@ -800,13 +798,9 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) return; } - if (!scsi_device_wide(sdev)) { - spi_max_width(starget) = 0; - max_width = 0; - } - /* test width */ - if (i->f->set_width && max_width) { + if (i->f->set_width && spi_max_width(starget) && + scsi_device_wide(sdev)) { i->f->set_width(starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, @@ -815,11 +809,6 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); i->f->set_width(starget, 0); - /* Make sure we don't force wide back on by asking - * for a transfer period that requires it */ - max_width = 0; - if (min_period < 10) - min_period = 10; } } @@ -839,8 +828,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); - DV_SET(period, min_period); - + DV_SET(period, spi_min_period(starget)); /* try QAS requests; this should be harmless to set if the * target supports it */ if (scsi_device_qas(sdev)) { @@ -849,14 +837,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) DV_SET(qas, 0); } - if (scsi_device_ius(sdev) && min_period < 9) { + if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); - if (min_period == 8) + if (spi_min_period(starget) == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); @@ -874,10 +862,6 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) } else { DV_SET(dt, 1); } - /* set width last because it will pull all the other - * parameters down to required values */ - DV_SET(width, max_width); - /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); diff --git a/trunk/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/trunk/drivers/serial/cpm_uart/cpm_uart_cpm1.h index 2a6477834c3e..a99e45e2b6d8 100644 --- a/trunk/drivers/serial/cpm_uart/cpm_uart_cpm1.h +++ b/trunk/drivers/serial/cpm_uart/cpm_uart_cpm1.h @@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) up->smc_tfcr = SMC_EB; } -#define DPRAM_BASE ((unsigned char *)cpm_dpram_addr(0)) +#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) #endif diff --git a/trunk/drivers/serial/serial_cs.c b/trunk/drivers/serial/serial_cs.c index 7c8d78fbbbfb..a0ea43598515 100644 --- a/trunk/drivers/serial/serial_cs.c +++ b/trunk/drivers/serial/serial_cs.c @@ -943,7 +943,6 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), - PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b), /* too generic */ /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */ /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */ diff --git a/trunk/drivers/serial/sunsab.c b/trunk/drivers/serial/sunsab.c index ff610c23314b..e348ba684050 100644 --- a/trunk/drivers/serial/sunsab.c +++ b/trunk/drivers/serial/sunsab.c @@ -38,7 +38,7 @@ #include #include -#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif diff --git a/trunk/drivers/w1/w1.c b/trunk/drivers/w1/w1.c index a593f900eff4..8d7ab74170d5 100644 --- a/trunk/drivers/w1/w1.c +++ b/trunk/drivers/w1/w1.c @@ -431,7 +431,6 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp, err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, &cur_len, "W1_SLAVE_ID=%024LX", (unsigned long long)sl->reg_num.id); - envp[cur_index] = NULL; if (err) return err; diff --git a/trunk/fs/aio.c b/trunk/fs/aio.c index ea2e19820381..dbe699e9828c 100644 --- a/trunk/fs/aio.c +++ b/trunk/fs/aio.c @@ -1562,7 +1562,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, fput(file); return -EAGAIN; } - req->ki_filp = file; if (iocb->aio_flags & IOCB_FLAG_RESFD) { /* * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an @@ -1577,6 +1576,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, } } + req->ki_filp = file; ret = put_user(req->ki_key, &user_iocb->aio_key); if (unlikely(ret)) { dprintk("EFAULT: aio_key\n"); diff --git a/trunk/fs/binfmt_flat.c b/trunk/fs/binfmt_flat.c index fcb3405bb14e..861141b4f6d6 100644 --- a/trunk/fs/binfmt_flat.c +++ b/trunk/fs/binfmt_flat.c @@ -742,7 +742,6 @@ static int load_flat_file(struct linux_binprm * bprm, * __start to address 4 so that is okay). */ if (rev > OLD_FLAT_VERSION) { - unsigned long persistent = 0; for (i=0; i < relocs; i++) { unsigned long addr, relval; @@ -750,8 +749,6 @@ static int load_flat_file(struct linux_binprm * bprm, relocated (of course, the address has to be relocated first). */ relval = ntohl(reloc[i]); - if (flat_set_persistent (relval, &persistent)) - continue; addr = flat_get_relocate_addr(relval); rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); if (rp == (unsigned long *)RELOC_FAILED) { @@ -760,8 +757,7 @@ static int load_flat_file(struct linux_binprm * bprm, } /* Get the pointer's value. */ - addr = flat_get_addr_from_rp(rp, relval, flags, - &persistent); + addr = flat_get_addr_from_rp(rp, relval, flags); if (addr != 0) { /* * Do the relocation. PIC relocs in the data section are diff --git a/trunk/fs/compat_ioctl.c b/trunk/fs/compat_ioctl.c index 37310b0e8107..5a5b7116cefb 100644 --- a/trunk/fs/compat_ioctl.c +++ b/trunk/fs/compat_ioctl.c @@ -3190,8 +3190,6 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY) COMPATIBLE_IOCTL(SIOCGIWRETRY) COMPATIBLE_IOCTL(SIOCSIWPOWER) COMPATIBLE_IOCTL(SIOCGIWPOWER) -COMPATIBLE_IOCTL(SIOCSIWAUTH) -COMPATIBLE_IOCTL(SIOCGIWAUTH) /* hiddev */ COMPATIBLE_IOCTL(HIDIOCGVERSION) COMPATIBLE_IOCTL(HIDIOCAPPLICATION) diff --git a/trunk/fs/lockd/svclock.c b/trunk/fs/lockd/svclock.c index d120ec39bcb0..a21e4bc5444b 100644 --- a/trunk/fs/lockd/svclock.c +++ b/trunk/fs/lockd/svclock.c @@ -171,14 +171,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie) * GRANTED_RES message by cookie, without having to rely on the client's IP * address. --okir */ -static struct nlm_block * -nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, - struct nlm_file *file, struct nlm_lock *lock, - struct nlm_cookie *cookie) +static inline struct nlm_block * +nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, + struct nlm_lock *lock, struct nlm_cookie *cookie) { struct nlm_block *block; + struct nlm_host *host; struct nlm_rqst *call = NULL; + /* Create host handle for callback */ + host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); + if (host == NULL) + return NULL; + call = nlm_alloc_call(host); if (call == NULL) return NULL; @@ -361,7 +366,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) { struct nlm_block *block = NULL; - struct nlm_host *host; int error; __be32 ret; @@ -373,10 +377,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, (long long)lock->fl.fl_end, wait); - /* Create host handle for callback */ - host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); - if (host == NULL) - return nlm_lck_denied_nolocks; /* Lock file against concurrent access */ mutex_lock(&file->f_mutex); @@ -385,8 +385,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, */ block = nlmsvc_lookup_block(file, lock); if (block == NULL) { - block = nlmsvc_create_block(rqstp, nlm_get_host(host), file, - lock, cookie); + block = nlmsvc_create_block(rqstp, file, lock, cookie); ret = nlm_lck_denied_nolocks; if (block == NULL) goto out; @@ -450,7 +449,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, out: mutex_unlock(&file->f_mutex); nlmsvc_release_block(block); - nlm_release_host(host); dprintk("lockd: nlmsvc_lock returned %u\n", ret); return ret; } @@ -479,17 +477,10 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, if (block == NULL) { struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - struct nlm_host *host; if (conf == NULL) return nlm_granted; - /* Create host handle for callback */ - host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); - if (host == NULL) { - kfree(conf); - return nlm_lck_denied_nolocks; - } - block = nlmsvc_create_block(rqstp, host, file, lock, cookie); + block = nlmsvc_create_block(rqstp, file, lock, cookie); if (block == NULL) { kfree(conf); return nlm_granted; diff --git a/trunk/fs/nfs/client.c b/trunk/fs/nfs/client.c index a204484072f3..a49f9feff776 100644 --- a/trunk/fs/nfs/client.c +++ b/trunk/fs/nfs/client.c @@ -588,6 +588,16 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat server->namelen = data->namlen; /* Create a client RPC handle for the NFSv3 ACL management interface */ nfs_init_server_aclclient(server); + if (clp->cl_nfsversion == 3) { + if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) + server->namelen = NFS3_MAXNAMLEN; + if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) + server->caps |= NFS_CAP_READDIRPLUS; + } else { + if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) + server->namelen = NFS2_MAXNAMLEN; + } + dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); return 0; @@ -784,16 +794,6 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data, error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; - if (server->nfs_client->rpc_ops->version == 3) { - if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) - server->namelen = NFS3_MAXNAMLEN; - if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) - server->caps |= NFS_CAP_READDIRPLUS; - } else { - if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) - server->namelen = NFS2_MAXNAMLEN; - } - if (!(fattr.valid & NFS_ATTR_FATTR)) { error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); if (error < 0) { @@ -984,9 +984,6 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data, if (error < 0) goto error; - if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) - server->namelen = NFS4_MAXNAMLEN; - BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); @@ -1059,9 +1056,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, if (error < 0) goto error; - if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) - server->namelen = NFS4_MAXNAMLEN; - dprintk("Referral FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); @@ -1121,9 +1115,6 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, if (error < 0) goto out_free_server; - if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) - server->namelen = NFS4_MAXNAMLEN; - dprintk("Cloned FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c index e4a04d16b8b0..ea97408e423e 100644 --- a/trunk/fs/nfs/dir.c +++ b/trunk/fs/nfs/dir.c @@ -1162,8 +1162,6 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc) } if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) return NULL; - if (name.len > NFS_SERVER(dir)->namelen) - return NULL; /* Note: caller is already holding the dir->i_mutex! */ dentry = d_alloc(parent, &name); if (dentry == NULL) diff --git a/trunk/fs/nfs/getroot.c b/trunk/fs/nfs/getroot.c index 522e5ad4d8ad..d1cbf0a0fbb2 100644 --- a/trunk/fs/nfs/getroot.c +++ b/trunk/fs/nfs/getroot.c @@ -175,9 +175,6 @@ int nfs4_path_walk(struct nfs_server *server, path++; name.len = path - (const char *) name.name; - if (name.len > NFS4_MAXNAMLEN) - return -ENAMETOOLONG; - eat_dot_dir: while (*path == '/') path++; diff --git a/trunk/fs/ocfs2/localalloc.c b/trunk/fs/ocfs2/localalloc.c index d272847d5a07..de984d272576 100644 --- a/trunk/fs/ocfs2/localalloc.c +++ b/trunk/fs/ocfs2/localalloc.c @@ -514,10 +514,8 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, ac->ac_bh = osb->local_alloc_bh; status = 0; bail: - if (status < 0 && local_alloc_inode) { - mutex_unlock(&local_alloc_inode->i_mutex); + if (status < 0 && local_alloc_inode) iput(local_alloc_inode); - } mlog_exit(status); return status; diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c index e95a36228863..c010a72ca2d2 100644 --- a/trunk/fs/splice.c +++ b/trunk/fs/splice.c @@ -1223,33 +1223,6 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; } -/* - * Do a copy-from-user while holding the mmap_semaphore for reading, in a - * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem - * for writing) and page faulting on the user memory pointed to by src. - * This assumes that we will very rarely hit the partial != 0 path, or this - * will not be a win. - */ -static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) -{ - int partial; - - pagefault_disable(); - partial = __copy_from_user_inatomic(dst, src, n); - pagefault_enable(); - - /* - * Didn't copy everything, drop the mmap_sem and do a faulting copy - */ - if (unlikely(partial)) { - up_read(¤t->mm->mmap_sem); - partial = copy_from_user(dst, src, n); - down_read(¤t->mm->mmap_sem); - } - - return partial; -} - /* * Map an iov into an array of pages and offset/length tupples. With the * partial_page structure, we can map several non-contiguous ranges into @@ -1263,26 +1236,31 @@ static int get_iovec_page_array(const struct iovec __user *iov, { int buffers = 0, error = 0; + /* + * It's ok to take the mmap_sem for reading, even + * across a "get_user()". + */ down_read(¤t->mm->mmap_sem); while (nr_vecs) { unsigned long off, npages; - struct iovec entry; void __user *base; size_t len; int i; - error = -EFAULT; - if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) + /* + * Get user address base and length for this iovec. + */ + error = get_user(base, &iov->iov_base); + if (unlikely(error)) + break; + error = get_user(len, &iov->iov_len); + if (unlikely(error)) break; - - base = entry.iov_base; - len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ - error = 0; if (unlikely(!len)) break; error = -EFAULT; diff --git a/trunk/fs/ufs/super.c b/trunk/fs/ufs/super.c index 38eb0b7a1f3d..73402c5eeb8a 100644 --- a/trunk/fs/ufs/super.c +++ b/trunk/fs/ufs/super.c @@ -894,7 +894,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) goto again; } - sbi->s_flags = flags;/*after that line some functions use s_flags*/ + ufs_print_super_stuff(sb, usb1, usb2, usb3); /* @@ -1025,6 +1025,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) UFS_MOUNT_UFSTYPE_44BSD) uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); + + sbi->s_flags = flags; inode = iget(sb, UFS_ROOTINO); if (!inode || is_bad_inode(inode)) diff --git a/trunk/fs/xfs/xfs_buf_item.h b/trunk/fs/xfs/xfs_buf_item.h index d7e136143066..fa25b7dcc6c3 100644 --- a/trunk/fs/xfs/xfs_buf_item.h +++ b/trunk/fs/xfs/xfs_buf_item.h @@ -52,6 +52,11 @@ typedef struct xfs_buf_log_format_t { #define XFS_BLI_UDQUOT_BUF 0x4 #define XFS_BLI_PDQUOT_BUF 0x8 #define XFS_BLI_GDQUOT_BUF 0x10 +/* + * This flag indicates that the buffer contains newly allocated + * inodes. + */ +#define XFS_BLI_INODE_NEW_BUF 0x20 #define XFS_BLI_CHUNK 128 #define XFS_BLI_SHIFT 7 diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index 8ae6e8e5f3db..7174991f4bef 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -1874,6 +1874,7 @@ xlog_recover_do_inode_buffer( /*ARGSUSED*/ STATIC void xlog_recover_do_reg_buffer( + xfs_mount_t *mp, xlog_recover_item_t *item, xfs_buf_t *bp, xfs_buf_log_format_t *buf_f) @@ -1884,6 +1885,50 @@ xlog_recover_do_reg_buffer( unsigned int *data_map = NULL; unsigned int map_size = 0; int error; + int stale_buf = 1; + + /* + * Scan through the on-disk inode buffer and attempt to + * determine if it has been written to since it was logged. + * + * - If any of the magic numbers are incorrect then the buffer is stale + * - If any of the modes are non-zero then the buffer is not stale + * - If all of the modes are zero and at least one of the generation + * counts is non-zero then the buffer is stale + * + * If the end result is a stale buffer then the log buffer is replayed + * otherwise it is skipped. + * + * This heuristic is not perfect. It can be improved by scanning the + * entire inode chunk for evidence that any of the inode clusters have + * been updated. To fix this problem completely we will need a major + * architectural change to the logging system. + */ + if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) { + xfs_dinode_t *dip; + int inodes_per_buf; + int mode_count = 0; + int gen_count = 0; + + stale_buf = 0; + inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; + for (i = 0; i < inodes_per_buf; i++) { + dip = (xfs_dinode_t *)xfs_buf_offset(bp, + i * mp->m_sb.sb_inodesize); + if (be16_to_cpu(dip->di_core.di_magic) != + XFS_DINODE_MAGIC) { + stale_buf = 1; + break; + } + if (dip->di_core.di_mode) + mode_count++; + if (dip->di_core.di_gen) + gen_count++; + } + + if (!mode_count && gen_count) + stale_buf = 1; + } switch (buf_f->blf_type) { case XFS_LI_BUF: @@ -1917,7 +1962,7 @@ xlog_recover_do_reg_buffer( -1, 0, XFS_QMOPT_DOWARN, "dquot_buf_recover"); } - if (!error) + if (!error && stale_buf) memcpy(xfs_buf_offset(bp, (uint)bit << XFS_BLI_SHIFT), /* dest */ item->ri_buf[i].i_addr, /* source */ @@ -2089,7 +2134,7 @@ xlog_recover_do_dquot_buffer( if (log->l_quotaoffs_flag & type) return; - xlog_recover_do_reg_buffer(item, bp, buf_f); + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); } /* @@ -2190,7 +2235,7 @@ xlog_recover_do_buffer_trans( (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); } else { - xlog_recover_do_reg_buffer(item, bp, buf_f); + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); } if (error) return XFS_ERROR(error); diff --git a/trunk/fs/xfs/xfs_trans_buf.c b/trunk/fs/xfs/xfs_trans_buf.c index 60b6b898022b..95fff6872a2f 100644 --- a/trunk/fs/xfs/xfs_trans_buf.c +++ b/trunk/fs/xfs/xfs_trans_buf.c @@ -966,6 +966,7 @@ xfs_trans_inode_alloc_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; + bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF; } diff --git a/trunk/include/acpi/acpi_drivers.h b/trunk/include/acpi/acpi_drivers.h index f85f77a538aa..202acb9ff4d0 100644 --- a/trunk/include/acpi/acpi_drivers.h +++ b/trunk/include/acpi/acpi_drivers.h @@ -147,6 +147,10 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle) /*-------------------------------------------------------------------------- Suspend/Resume -------------------------------------------------------------------------- */ +#ifdef CONFIG_ACPI_SLEEP extern int acpi_sleep_init(void); +#else +static inline int acpi_sleep_init(void) { return 0; } +#endif #endif /*__ACPI_DRIVERS_H__*/ diff --git a/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h index 69b9f8e120e9..e043cafa3c42 100644 --- a/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h @@ -1,6 +1,5 @@ #include #include -#include #define NR_PORTS 1 @@ -93,24 +92,18 @@ struct bfin_serial_res bfin_serial_resource[] = { } }; -#define DRIVER_NAME "bfin-uart" int nr_ports = NR_PORTS; static void bfin_serial_hw_init(struct bfin_serial_port *uart) { -#ifdef CONFIG_SERIAL_BFIN_UART0 - peripheral_request(P_UART0_TX, DRIVER_NAME); - peripheral_request(P_UART0_RX, DRIVER_NAME); -#endif - #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) { - gpio_request(uart->cts_pin, DRIVER_NAME); + gpio_request(uart->cts_pin, NULL); gpio_direction_input(uart->cts_pin); } if (uart->rts_pin >= 0) { - gpio_request(uart->rts_pin, DRIVER_NAME); + gpio_request(uart->rts_pin, NULL); gpio_direction_input(uart->rts_pin); } #endif diff --git a/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h index 6fb328f5186a..8f5d9c4d8d5b 100644 --- a/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h @@ -1,6 +1,5 @@ #include #include -#include #define NR_PORTS 2 @@ -123,29 +122,25 @@ struct bfin_serial_res bfin_serial_resource[] = { int nr_ports = ARRAY_SIZE(bfin_serial_resource); -#define DRIVER_NAME "bfin-uart" - static void bfin_serial_hw_init(struct bfin_serial_port *uart) { + unsigned short val; + val = bfin_read16(BFIN_PORT_MUX); + val &= ~(PFDE | PFTE); + bfin_write16(BFIN_PORT_MUX, val); -#ifdef CONFIG_SERIAL_BFIN_UART0 - peripheral_request(P_UART0_TX, DRIVER_NAME); - peripheral_request(P_UART0_RX, DRIVER_NAME); -#endif - -#ifdef CONFIG_SERIAL_BFIN_UART1 - peripheral_request(P_UART1_TX, DRIVER_NAME); - peripheral_request(P_UART1_RX, DRIVER_NAME); -#endif + val = bfin_read16(PORTF_FER); + val |= 0xF; + bfin_write16(PORTF_FER, val); #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) { - gpio_request(uart->cts_pin, DRIVER_NAME); + gpio_request(uart->cts_pin, NULL); gpio_direction_input(uart->cts_pin); } if (uart->rts_pin >= 0) { - gpio_request(uart->rts_pin, DRIVER_NAME); + gpio_request(uart->rts_pin, NULL); gpio_direction_output(uart->rts_pin); } #endif diff --git a/trunk/include/asm-blackfin/mach-bf537/portmux.h b/trunk/include/asm-blackfin/mach-bf537/portmux.h index ae6c53b28452..23e13c5abc4d 100644 --- a/trunk/include/asm-blackfin/mach-bf537/portmux.h +++ b/trunk/include/asm-blackfin/mach-bf537/portmux.h @@ -106,37 +106,4 @@ #define P_SPI0_SSEL2 (P_DEFINED | P_IDENT(PORT_PJ11) | P_FUNCT(1)) #define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(PORT_PJ5) | P_FUNCT(2)) -#define P_MII0 {\ - P_MII0_ETxD0, \ - P_MII0_ETxD1, \ - P_MII0_ETxD2, \ - P_MII0_ETxD3, \ - P_MII0_ETxEN, \ - P_MII0_TxCLK, \ - P_MII0_PHYINT, \ - P_MII0_COL, \ - P_MII0_ERxD0, \ - P_MII0_ERxD1, \ - P_MII0_ERxD2, \ - P_MII0_ERxD3, \ - P_MII0_ERxDV, \ - P_MII0_ERxCLK, \ - P_MII0_ERxER, \ - P_MII0_CRS, \ - P_MDC, \ - P_MDIO, 0} - - -#define P_RMII0 {\ - P_MII0_ETxD0, \ - P_MII0_ETxD1, \ - P_MII0_ETxEN, \ - P_MII0_ERxD0, \ - P_MII0_ERxD1, \ - P_MII0_ERxER, \ - P_RMII0_REF_CLK, \ - P_RMII0_MDINT, \ - P_RMII0_CRS_DV, \ - P_MDC, \ - P_MDIO, 0} -#endif /* _MACH_PORTMUX_H_ */ +#endif /* _MACH_PORTMUX_H_ */ diff --git a/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h index 69b9f8e120e9..e043cafa3c42 100644 --- a/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h @@ -1,6 +1,5 @@ #include #include -#include #define NR_PORTS 1 @@ -93,24 +92,18 @@ struct bfin_serial_res bfin_serial_resource[] = { } }; -#define DRIVER_NAME "bfin-uart" int nr_ports = NR_PORTS; static void bfin_serial_hw_init(struct bfin_serial_port *uart) { -#ifdef CONFIG_SERIAL_BFIN_UART0 - peripheral_request(P_UART0_TX, DRIVER_NAME); - peripheral_request(P_UART0_RX, DRIVER_NAME); -#endif - #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) { - gpio_request(uart->cts_pin, DRIVER_NAME); + gpio_request(uart->cts_pin, NULL); gpio_direction_input(uart->cts_pin); } if (uart->rts_pin >= 0) { - gpio_request(uart->rts_pin, DRIVER_NAME); + gpio_request(uart->rts_pin, NULL); gpio_direction_input(uart->rts_pin); } #endif diff --git a/trunk/include/asm-blackfin/portmux.h b/trunk/include/asm-blackfin/portmux.h index 0d3f650d2d99..9d3681e42111 100644 --- a/trunk/include/asm-blackfin/portmux.h +++ b/trunk/include/asm-blackfin/portmux.h @@ -14,12 +14,6 @@ #define P_MAYSHARE 0x2000 #define P_DONTCARE 0x1000 - -int peripheral_request(unsigned short per, const char *label); -void peripheral_free(unsigned short per); -int peripheral_request_list(unsigned short per[], const char *label); -void peripheral_free_list(unsigned short per[]); - #include #include @@ -151,22 +145,6 @@ void peripheral_free_list(unsigned short per[]); #define P_SPI2_SSEL3 P_UNDEF #endif -#ifndef P_SPI2_SSEL4 -#define P_SPI2_SSEL4 P_UNDEF -#endif - -#ifndef P_SPI2_SSEL5 -#define P_SPI2_SSEL5 P_UNDEF -#endif - -#ifndef P_SPI2_SSEL6 -#define P_SPI2_SSEL6 P_UNDEF -#endif - -#ifndef P_SPI2_SSEL7 -#define P_SPI2_SSEL7 P_UNDEF -#endif - #ifndef P_SPI2_SCK #define P_SPI2_SCK P_UNDEF #endif @@ -535,22 +513,6 @@ void peripheral_free_list(unsigned short per[]); #define P_SPI0_SSEL3 P_UNDEF #endif -#ifndef P_SPI0_SSEL4 -#define P_SPI0_SSEL4 P_UNDEF -#endif - -#ifndef P_SPI0_SSEL5 -#define P_SPI0_SSEL5 P_UNDEF -#endif - -#ifndef P_SPI0_SSEL6 -#define P_SPI0_SSEL6 P_UNDEF -#endif - -#ifndef P_SPI0_SSEL7 -#define P_SPI0_SSEL7 P_UNDEF -#endif - #ifndef P_UART0_TX #define P_UART0_TX P_UNDEF #endif @@ -779,23 +741,6 @@ void peripheral_free_list(unsigned short per[]); #define P_SPI1_SSEL3 P_UNDEF #endif - -#ifndef P_SPI1_SSEL4 -#define P_SPI1_SSEL4 P_UNDEF -#endif - -#ifndef P_SPI1_SSEL5 -#define P_SPI1_SSEL5 P_UNDEF -#endif - -#ifndef P_SPI1_SSEL6 -#define P_SPI1_SSEL6 P_UNDEF -#endif - -#ifndef P_SPI1_SSEL7 -#define P_SPI1_SSEL7 P_UNDEF -#endif - #ifndef P_SPI1_SCK #define P_SPI1_SCK P_UNDEF #endif diff --git a/trunk/include/asm-blackfin/unistd.h b/trunk/include/asm-blackfin/unistd.h index 07ffe8b718c5..0df9f2d322a3 100644 --- a/trunk/include/asm-blackfin/unistd.h +++ b/trunk/include/asm-blackfin/unistd.h @@ -3,7 +3,6 @@ /* * This file contains the system call numbers. */ -#define __NR_restart_syscall 0 #define __NR_exit 1 #define __NR_fork 2 #define __NR_read 3 @@ -166,13 +165,13 @@ #define __NR_sched_get_priority_min 160 #define __NR_sched_rr_get_interval 161 #define __NR_nanosleep 162 -#define __NR_mremap 163 + /* 163 __NR_mremap */ #define __NR_setresuid 164 #define __NR_getresuid 165 /* 166 __NR_vm86 */ /* 167 __NR_query_module */ /* 168 __NR_poll */ -#define __NR_nfsservctl 169 + /* 169 __NR_nfsservctl */ #define __NR_setresgid 170 #define __NR_getresgid 171 #define __NR_prctl 172 @@ -228,7 +227,7 @@ /* 222 reserved for TUX */ /* 223 reserved for TUX */ #define __NR_gettid 224 -#define __NR_readahead 225 + /* 225 __NR_readahead */ #define __NR_setxattr 226 #define __NR_lsetxattr 227 #define __NR_fsetxattr 228 @@ -288,7 +287,7 @@ #define __NR_mq_timedreceive (__NR_mq_open+3) #define __NR_mq_notify (__NR_mq_open+4) #define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 284 + /* 284 __NR_sys_kexec_load */ #define __NR_waitid 285 #define __NR_add_key 286 #define __NR_request_key 287 @@ -353,54 +352,9 @@ #define __NR_shmdt 340 #define __NR_shmget 341 -#define __NR_splice 342 -#define __NR_sync_file_range 343 -#define __NR_tee 344 -#define __NR_vmsplice 345 - -#define __NR_epoll_pwait 346 -#define __NR_utimensat 347 -#define __NR_signalfd 348 -#define __NR_timerfd 349 -#define __NR_eventfd 350 -#define __NR_pread64 351 -#define __NR_pwrite64 352 -#define __NR_fadvise64 353 -#define __NR_set_robust_list 354 -#define __NR_get_robust_list 355 -#define __NR_fallocate 356 - -#define __NR_syscall 357 +#define __NR_syscall 342 #define NR_syscalls __NR_syscall -/* Old optional stuff no one actually uses */ -#define __IGNORE_sysfs -#define __IGNORE_uselib - -/* Implement the newer interfaces */ -#define __IGNORE_mmap -#define __IGNORE_poll -#define __IGNORE_select -#define __IGNORE_utime - -/* Not relevant on no-mmu */ -#define __IGNORE_swapon -#define __IGNORE_swapoff -#define __IGNORE_msync -#define __IGNORE_mlock -#define __IGNORE_munlock -#define __IGNORE_mlockall -#define __IGNORE_munlockall -#define __IGNORE_mincore -#define __IGNORE_madvise -#define __IGNORE_remap_file_pages -#define __IGNORE_mbind -#define __IGNORE_get_mempolicy -#define __IGNORE_set_mempolicy -#define __IGNORE_migrate_pages -#define __IGNORE_move_pages -#define __IGNORE_getcpu - #ifdef __KERNEL__ #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-h8300/flat.h b/trunk/include/asm-h8300/flat.h index 2a873508a9a1..c20eee767d6f 100644 --- a/trunk/include/asm-h8300/flat.h +++ b/trunk/include/asm-h8300/flat.h @@ -9,7 +9,6 @@ #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) 1 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_set_persistent(relval, p) 0 /* * on the H8 a couple of the relocations have an instruction in the @@ -19,7 +18,7 @@ */ #define flat_get_relocate_addr(rel) (rel) -#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ +#define flat_get_addr_from_rp(rp, relval, flags) \ (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff)) #define flat_put_addr_at_rp(rp, addr, rel) \ put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp) diff --git a/trunk/include/asm-i386/system.h b/trunk/include/asm-i386/system.h index d69ba937e092..609756c61676 100644 --- a/trunk/include/asm-i386/system.h +++ b/trunk/include/asm-i386/system.h @@ -214,6 +214,11 @@ static inline unsigned long get_limit(unsigned long segment) */ +/* + * Actually only lfence would be needed for mb() because all stores done + * by the kernel should be already ordered. But keep a full barrier for now. + */ + #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) diff --git a/trunk/include/asm-m32r/flat.h b/trunk/include/asm-m32r/flat.h index d851cf0c4aa5..1b285f65cab6 100644 --- a/trunk/include/asm-m32r/flat.h +++ b/trunk/include/asm-m32r/flat.h @@ -15,10 +15,9 @@ #define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0)) #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) -#define flat_set_persistent(relval, p) 0 #define flat_reloc_valid(reloc, size) \ (((reloc) - textlen_for_m32r_lo16_data) <= (size)) -#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ +#define flat_get_addr_from_rp(rp, relval, flags) \ m32r_flat_get_addr_from_rp(rp, relval, (text_len) ) #define flat_put_addr_at_rp(rp, addr, relval) \ diff --git a/trunk/include/asm-m68knommu/flat.h b/trunk/include/asm-m68knommu/flat.h index 814b5174a8e0..2d836edc4344 100644 --- a/trunk/include/asm-m68knommu/flat.h +++ b/trunk/include/asm-m68knommu/flat.h @@ -9,9 +9,8 @@ #define flat_argvp_envp_on_stack() 1 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) +#define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp) #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) #define flat_get_relocate_addr(rel) (rel) -#define flat_set_persistent(relval, p) 0 #endif /* __M68KNOMMU_FLAT_H__ */ diff --git a/trunk/include/asm-mips/cmpxchg.h b/trunk/include/asm-mips/cmpxchg.h deleted file mode 100644 index c5b4708e003b..000000000000 --- a/trunk/include/asm-mips/cmpxchg.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) - */ -#ifndef __ASM_CMPXCHG_H -#define __ASM_CMPXCHG_H - -#include - -#define __HAVE_ARCH_CMPXCHG 1 - -#define __cmpxchg_asm(ld, st, m, old, new) \ -({ \ - __typeof(*(m)) __ret; \ - \ - if (cpu_has_llsc && R10000_LLSC_WAR) { \ - __asm__ __volatile__( \ - " .set push \n" \ - " .set noat \n" \ - " .set mips3 \n" \ - "1: " ld " %0, %2 # __cmpxchg_asm \n" \ - " bne %0, %z3, 2f \n" \ - " .set mips0 \n" \ - " move $1, %z4 \n" \ - " .set mips3 \n" \ - " " st " $1, %1 \n" \ - " beqzl $1, 1b \n" \ - "2: \n" \ - " .set pop \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ - : "memory"); \ - } else if (cpu_has_llsc) { \ - __asm__ __volatile__( \ - " .set push \n" \ - " .set noat \n" \ - " .set mips3 \n" \ - "1: " ld " %0, %2 # __cmpxchg_asm \n" \ - " bne %0, %z3, 2f \n" \ - " .set mips0 \n" \ - " move $1, %z4 \n" \ - " .set mips3 \n" \ - " " st " $1, %1 \n" \ - " beqz $1, 3f \n" \ - "2: \n" \ - " .subsection 2 \n" \ - "3: b 1b \n" \ - " .previous \n" \ - " .set pop \n" \ - : "=&r" (__ret), "=R" (*m) \ - : "R" (*m), "Jr" (old), "Jr" (new) \ - : "memory"); \ - } else { \ - unsigned long __flags; \ - \ - raw_local_irq_save(__flags); \ - __ret = *m; \ - if (__ret == old) \ - *m = new; \ - raw_local_irq_restore(__flags); \ - } \ - \ - __ret; \ -}) - -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid cmpxchg(). - */ -extern void __cmpxchg_called_with_bad_pointer(void); - -#define __cmpxchg(ptr,old,new,barrier) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ - __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __res = 0; \ - \ - barrier; \ - \ - switch (sizeof(*(__ptr))) { \ - case 4: \ - __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \ - break; \ - case 8: \ - if (sizeof(long) == 8) { \ - __res = __cmpxchg_asm("lld", "scd", __ptr, \ - __old, __new); \ - break; \ - } \ - default: \ - __cmpxchg_called_with_bad_pointer(); \ - break; \ - } \ - \ - barrier; \ - \ - __res; \ -}) - -#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb()) -#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new,) - -#endif /* __ASM_CMPXCHG_H */ diff --git a/trunk/include/asm-mips/fcntl.h b/trunk/include/asm-mips/fcntl.h index 2a52333a062d..00a50ec1c19f 100644 --- a/trunk/include/asm-mips/fcntl.h +++ b/trunk/include/asm-mips/fcntl.h @@ -13,7 +13,6 @@ #define O_SYNC 0x0010 #define O_NONBLOCK 0x0080 #define O_CREAT 0x0100 /* not fcntl */ -#define O_TRUNC 0x0200 /* not fcntl */ #define O_EXCL 0x0400 /* not fcntl */ #define O_NOCTTY 0x0800 /* not fcntl */ #define FASYNC 0x1000 /* fcntl, for BSD compatibility */ diff --git a/trunk/include/asm-mips/irq.h b/trunk/include/asm-mips/irq.h index 2cb52cf8bd4e..97102ebc54b1 100644 --- a/trunk/include/asm-mips/irq.h +++ b/trunk/include/asm-mips/irq.h @@ -24,30 +24,7 @@ static inline int irq_canonicalize(int irq) #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #endif -#ifdef CONFIG_MIPS_MT_SMTC - -struct irqaction; - -extern unsigned long irq_hwmask[]; -extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, - unsigned long hwmask); - -static inline void smtc_im_ack_irq(unsigned int irq) -{ - if (irq_hwmask[irq] & ST0_IM) - set_c0_status(irq_hwmask[irq] & ST0_IM); -} - -#else - -static inline void smtc_im_ack_irq(unsigned int irq) -{ -} - -#endif /* CONFIG_MIPS_MT_SMTC */ - #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP - /* * Clear interrupt mask handling "backstop" if irq_hwmask * entry so indicates. This implies that the ack() or end() @@ -61,7 +38,6 @@ do { \ ~(irq_hwmask[irq] & 0x0000ff00)); \ } while (0) #else - #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) #endif @@ -84,6 +60,14 @@ do { \ extern void arch_init_irq(void); extern void spurious_interrupt(void); +#ifdef CONFIG_MIPS_MT_SMTC +struct irqaction; + +extern unsigned long irq_hwmask[]; +extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, + unsigned long hwmask); +#endif /* CONFIG_MIPS_MT_SMTC */ + extern int allocate_irqno(void); extern void alloc_legacy_irqno(void); extern void free_irqno(unsigned int irq); diff --git a/trunk/include/asm-mips/local.h b/trunk/include/asm-mips/local.h index f9a5ce5c9af1..ed882c88e0ca 100644 --- a/trunk/include/asm-mips/local.h +++ b/trunk/include/asm-mips/local.h @@ -4,7 +4,6 @@ #include #include #include -#include #include typedef struct @@ -115,6 +114,68 @@ static __inline__ long local_sub_return(long i, local_t * l) return result; } +/* + * local_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract + * @l: pointer of type local_t + * + * Atomically test @l and subtract @i if @l is greater or equal than @i. + * The function returns the old value of @l minus @i. + */ +static __inline__ long local_sub_if_positive(long i, local_t * l) +{ + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + __SC "%0, %2 \n" + " .set noreorder \n" + " beqzl %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + " .set mips3 \n" + "1:" __LL "%1, %2 # local_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + __SC "%0, %2 \n" + " .set noreorder \n" + " beqz %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) + : "Ir" (i), "m" (l->a.counter) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + result = l->a.counter; + result -= i; + if (result >= 0) + l->a.counter = result; + local_irq_restore(flags); + } + + return result; +} + #define local_cmpxchg(l, o, n) \ ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) #define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n))) @@ -172,6 +233,12 @@ static __inline__ long local_sub_return(long i, local_t * l) */ #define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) +/* + * local_dec_if_positive - decrement by 1 if old value positive + * @l: pointer of type local_t + */ +#define local_dec_if_positive(l) local_sub_if_positive(1, l) + /* * local_add_negative - add and test if negative * @l: pointer of type local_t diff --git a/trunk/include/asm-mips/page.h b/trunk/include/asm-mips/page.h index e3301e54d559..b92dd8c760da 100644 --- a/trunk/include/asm-mips/page.h +++ b/trunk/include/asm-mips/page.h @@ -142,7 +142,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * __pa()/__va() should be used only during mem init. */ -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) #define __pa(x) \ ({ \ unsigned long __x = (unsigned long)(x); \ diff --git a/trunk/include/asm-mips/system.h b/trunk/include/asm-mips/system.h index 480b574e2483..357251f42518 100644 --- a/trunk/include/asm-mips/system.h +++ b/trunk/include/asm-mips/system.h @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -195,6 +194,266 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, + unsigned long new) +{ + __u32 retval; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqz $1, 3f \n" + "2: \n" + " .subsection 2 \n" + "3: b 1b \n" + " .previous \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + raw_local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + raw_local_irq_restore(flags); /* implies memory barrier */ + } + + smp_llsc_mb(); + + return retval; +} + +static inline unsigned long __cmpxchg_u32_local(volatile int * m, + unsigned long old, unsigned long new) +{ + __u32 retval; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: ll %0, %2 # __cmpxchg_u32 \n" + " bne %0, %z3, 2f \n" + " .set mips0 \n" + " move $1, %z4 \n" + " .set mips3 \n" + " sc $1, %1 \n" + " beqz $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + } + + return retval; +} + +#ifdef CONFIG_64BIT +static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, + unsigned long new) +{ + __u64 retval; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqz $1, 3f \n" + "2: \n" + " .subsection 2 \n" + "3: b 1b \n" + " .previous \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + raw_local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + raw_local_irq_restore(flags); /* implies memory barrier */ + } + + smp_llsc_mb(); + + return retval; +} + +static inline unsigned long __cmpxchg_u64_local(volatile int * m, + unsigned long old, unsigned long new) +{ + __u64 retval; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqzl $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else if (cpu_has_llsc) { + __asm__ __volatile__( + " .set push \n" + " .set noat \n" + " .set mips3 \n" + "1: lld %0, %2 # __cmpxchg_u64 \n" + " bne %0, %z3, 2f \n" + " move $1, %z4 \n" + " scd $1, %1 \n" + " beqz $1, 1b \n" + "2: \n" + " .set pop \n" + : "=&r" (retval), "=R" (*m) + : "R" (*m), "Jr" (old), "Jr" (new) + : "memory"); + } else { + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); /* implies memory barrier */ + } + + return retval; +} + +#else +extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( + volatile int * m, unsigned long old, unsigned long new); +#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels +extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels( + volatile int * m, unsigned long old, unsigned long new); +#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels +#endif + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +static inline unsigned long __cmpxchg_local(volatile void * ptr, + unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32_local(ptr, old, new); + case 8: + return __cmpxchg_u64_local(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,old,new) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) + +#define cmpxchg_local(ptr,old,new) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) + extern void set_handler (unsigned long offset, void *addr, unsigned long len); extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); diff --git a/trunk/include/asm-sh/flat.h b/trunk/include/asm-sh/flat.h index dc4f5950dafa..0d5cc04ab005 100644 --- a/trunk/include/asm-sh/flat.h +++ b/trunk/include/asm-sh/flat.h @@ -16,9 +16,8 @@ #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) +#define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp) #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) #define flat_get_relocate_addr(rel) (rel) -#define flat_set_persistent(relval, p) 0 #endif /* __ASM_SH_FLAT_H */ diff --git a/trunk/include/asm-v850/flat.h b/trunk/include/asm-v850/flat.h index 17f0ea566611..3888f59d6881 100644 --- a/trunk/include/asm-v850/flat.h +++ b/trunk/include/asm-v850/flat.h @@ -25,7 +25,6 @@ #define flat_stack_align(sp) /* nothing needed */ #define flat_argvp_envp_on_stack() 0 #define flat_old_ram_flag(flags) (flags) -#define flat_set_persistent(relval, p) 0 /* We store the type of relocation in the top 4 bits of the `relval.' */ @@ -47,8 +46,7 @@ flat_get_relocate_addr (unsigned long relval) For the v850, RP should always be half-word aligned. */ static inline unsigned long flat_get_addr_from_rp (unsigned long *rp, unsigned long relval, - unsigned long flags, - unsigned long *persistent) + unsigned long flags) { short *srp = (short *)rp; diff --git a/trunk/include/asm-x86_64/processor.h b/trunk/include/asm-x86_64/processor.h index 31f579b828f2..19525175b91c 100644 --- a/trunk/include/asm-x86_64/processor.h +++ b/trunk/include/asm-x86_64/processor.h @@ -371,7 +371,7 @@ static inline void sync_core(void) #define ARCH_HAS_PREFETCH static inline void prefetch(void *x) { - asm volatile("prefetcht0 (%0)" :: "r" (x)); + asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #define ARCH_HAS_PREFETCHW 1 diff --git a/trunk/include/linux/cpufreq.h b/trunk/include/linux/cpufreq.h index 3ec6e7ff5fbd..963051a967d6 100644 --- a/trunk/include/linux/cpufreq.h +++ b/trunk/include/linux/cpufreq.h @@ -32,7 +32,15 @@ * CPUFREQ NOTIFIER INTERFACE * *********************************************************************/ +#ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +#else +static inline int cpufreq_register_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +#endif int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); #define CPUFREQ_TRANSITION_NOTIFIER (0) @@ -260,17 +268,22 @@ struct freq_attr { int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); -/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ -unsigned int cpufreq_get(unsigned int cpu); -/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ +/* + * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it + */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); +unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_quick_get(unsigned int cpu) { return 0; } +static inline unsigned int cpufreq_get(unsigned int cpu) +{ + return 0; +} #endif diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 313c6b6e774f..a01ac6dd5f5e 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -113,7 +113,7 @@ extern unsigned long avenrun[]; /* Load averages */ #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<robust_list; - struct robust_list __user *entry, *next_entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; + struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned long futex_offset; - int rc; /* * Fetch the list head (which was registered earlier, via @@ -1966,13 +1965,11 @@ void exit_robust_list(struct task_struct *curr) if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; - next_entry = NULL; /* avoid warning with gcc */ + if (pending) + handle_futex_death((void __user *)pending + futex_offset, + curr, pip); + while (entry != &head->list) { - /* - * Fetch the next entry in the list before calling - * handle_futex_death: - */ - rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: @@ -1981,10 +1978,11 @@ void exit_robust_list(struct task_struct *curr) if (handle_futex_death((void __user *)entry + futex_offset, curr, pi)) return; - if (rc) + /* + * Fetch the next entry in the list: + */ + if (fetch_robust_entry(&entry, &entry->next, &pi)) return; - entry = next_entry; - pi = next_pi; /* * Avoid excessively long or circular lists: */ @@ -1993,10 +1991,6 @@ void exit_robust_list(struct task_struct *curr) cond_resched(); } - - if (pending) - handle_futex_death((void __user *)pending + futex_offset, - curr, pip); } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, diff --git a/trunk/kernel/futex_compat.c b/trunk/kernel/futex_compat.c index 2c2e2954b713..7e52eb051f22 100644 --- a/trunk/kernel/futex_compat.c +++ b/trunk/kernel/futex_compat.c @@ -38,11 +38,10 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; - struct robust_list __user *entry, *next_entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; - compat_uptr_t uentry, next_uentry, upending; + struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; + compat_uptr_t uentry, upending; compat_long_t futex_offset; - int rc; /* * Fetch the list head (which was registered earlier, via @@ -62,15 +61,10 @@ void compat_exit_robust_list(struct task_struct *curr) if (fetch_robust_entry(&upending, &pending, &head->list_op_pending, &pip)) return; + if (pending) + handle_futex_death((void __user *)pending + futex_offset, curr, pip); - next_entry = NULL; /* avoid warning with gcc */ while (entry != (struct robust_list __user *) &head->list) { - /* - * Fetch the next entry in the list before calling - * handle_futex_death: - */ - rc = fetch_robust_entry(&next_uentry, &next_entry, - (compat_uptr_t __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice: @@ -80,11 +74,12 @@ void compat_exit_robust_list(struct task_struct *curr) curr, pi)) return; - if (rc) + /* + * Fetch the next entry in the list: + */ + if (fetch_robust_entry(&uentry, &entry, + (compat_uptr_t __user *)&entry->next, &pi)) return; - uentry = next_uentry; - entry = next_entry; - pi = next_pi; /* * Avoid excessively long or circular lists: */ @@ -93,9 +88,6 @@ void compat_exit_robust_list(struct task_struct *curr) cond_resched(); } - if (pending) - handle_futex_death((void __user *)pending + futex_offset, - curr, pip); } asmlinkage long diff --git a/trunk/kernel/power/Kconfig b/trunk/kernel/power/Kconfig index 14b0e10dc95c..c8580a1e6873 100644 --- a/trunk/kernel/power/Kconfig +++ b/trunk/kernel/power/Kconfig @@ -110,7 +110,7 @@ config SUSPEND config HIBERNATION_UP_POSSIBLE bool - depends on X86 || PPC64_SWSUSP || PPC32 + depends on X86 || PPC64_SWSUSP || FRV || PPC32 depends on !SMP default y diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 67c67a87146e..c9fbe8e73a45 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -639,16 +639,6 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) se->block_start = 0; se->sum_sleep_runtime += delta; - - /* - * Blocking time is in units of nanosecs, so shift by 20 to - * get a milliseconds-range estimation of the amount of - * time that the task spent sleeping: - */ - if (unlikely(prof_on == SLEEP_PROFILING)) { - profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), - delta >> 20); - } } #endif } diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 792952381092..9fb91a32edda 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -531,18 +531,18 @@ static int check_kill_permission(int sig, struct siginfo *info, if (!valid_signal(sig)) return error; - if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) { - error = audit_signal_info(sig, t); /* Let audit system see the signal */ - if (error) - return error; - error = -EPERM; - if (((sig != SIGCONT) || - (process_session(current) != process_session(t))) - && (current->euid ^ t->suid) && (current->euid ^ t->uid) - && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) + error = audit_signal_info(sig, t); /* Let audit system see the signal */ + if (error) + return error; + + error = -EPERM; + if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) + && ((sig != SIGCONT) || + (process_session(current) != process_session(t))) + && (current->euid ^ t->suid) && (current->euid ^ t->uid) + && (current->uid ^ t->suid) && (current->uid ^ t->uid) + && !capable(CAP_KILL)) return error; - } return security_task_kill(t, info, sig, 0); } diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index 8ae2e636eb1b..1b33b05d346b 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -879,7 +878,6 @@ void kernel_power_off(void) kernel_shutdown_prepare(SYSTEM_POWER_OFF); if (pm_power_off_prepare) pm_power_off_prepare(); - disable_nonboot_cpus(); sysdev_shutdown(); printk(KERN_EMERG "Power down.\n"); machine_power_off(); diff --git a/trunk/kernel/time/timer_stats.c b/trunk/kernel/time/timer_stats.c index c36bb7ed0301..3c38fb5eae1b 100644 --- a/trunk/kernel/time/timer_stats.c +++ b/trunk/kernel/time/timer_stats.c @@ -327,9 +327,8 @@ static int tstats_show(struct seq_file *m, void *v) ms = 1; if (events && period.tv_sec) - seq_printf(m, "%ld total events, %ld.%03ld events/sec\n", - events, events * 1000 / ms, - (events * 1000000 / ms) % 1000); + seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events, + events / period.tv_sec, events * 1000 / ms); else seq_printf(m, "%ld total events\n", events); diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index cdc9b099e620..50a94eee4d92 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -284,7 +284,7 @@ config LOCKDEP select KALLSYMS_ALL config LOCK_STAT - bool "Lock usage statistics" + bool "Lock usage statisitics" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select LOCKDEP select DEBUG_SPINLOCK @@ -294,8 +294,6 @@ config LOCK_STAT help This feature enables tracking lock contention points - For more details, see Documentation/lockstat.txt - config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index 4f3f3e256501..6b0ba8cf4e5f 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for some libs needed in the kernel. # -lib-y := ctype.o string.o vsprintf.o cmdline.o \ +lib-y := ctype.o string.o vsprintf.o kasprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o @@ -13,7 +13,7 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o kobject_uevent.o klist.o obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ - bust_spinlocks.o hexdump.o kasprintf.o + bust_spinlocks.o hexdump.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/trunk/mm/Kconfig b/trunk/mm/Kconfig index a7609cbcb00d..e24d348083c3 100644 --- a/trunk/mm/Kconfig +++ b/trunk/mm/Kconfig @@ -137,7 +137,6 @@ config SPLIT_PTLOCK_CPUS int default "4096" if ARM && !CPU_CACHE_VIPT default "4096" if PARISC && !PA20 - default "4096" if XEN default "4" # diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 15c8413ee929..90b657b50f81 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1388,7 +1388,6 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (unlikely(vmf->pgoff >= size)) { unlock_page(page); - page_cache_release(page); goto outside_data_content; } diff --git a/trunk/mm/fremap.c b/trunk/mm/fremap.c index 95bcb5641c72..c395b1abf082 100644 --- a/trunk/mm/fremap.c +++ b/trunk/mm/fremap.c @@ -160,7 +160,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) goto out; - if (!(vma->vm_flags & VM_CAN_NONLINEAR)) + if (!vma->vm_flags & VM_CAN_NONLINEAR) goto out; if (end <= start || start < vma->vm_start || end > vma->vm_end) diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index eab8c428cc93..84c795ee2d65 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr) might_sleep(); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { cond_resched(); - clear_user_highpage(page + i, addr + i * PAGE_SIZE); + clear_user_highpage(page + i, addr); } } diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index f82b359b2745..ca8cac11bd2c 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1639,7 +1639,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *old_page, *new_page; pte_t entry; int reuse = 0, ret = 0; - int page_mkwrite = 0; struct page *dirty_page = NULL; old_page = vm_normal_page(vma, address, orig_pte); @@ -1688,8 +1687,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(old_page); if (!pte_same(*page_table, orig_pte)) goto unlock; - - page_mkwrite = 1; } dirty_page = old_page; get_page(dirty_page); @@ -1777,7 +1774,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * do_no_page is protected similarly. */ wait_on_page_locked(dirty_page); - set_page_dirty_balance(dirty_page, page_mkwrite); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } return ret; @@ -2310,14 +2307,13 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, * do not need to flush old virtual caches or the TLB. * * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte neither mapped nor locked. + * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, + unsigned long address, pte_t *page_table, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { - pte_t *page_table; spinlock_t *ptl; struct page *page; pte_t entry; @@ -2325,13 +2321,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, struct page *dirty_page = NULL; struct vm_fault vmf; int ret; - int page_mkwrite = 0; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = pgoff; vmf.flags = flags; vmf.page = NULL; + pte_unmap(page_table); BUG_ON(vma->vm_flags & VM_PFNMAP); if (likely(vma->vm_ops->fault)) { @@ -2402,7 +2398,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, anon = 1; /* no anon but release vmf.page */ goto out; } - page_mkwrite = 1; } } @@ -2458,7 +2453,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (anon) page_cache_release(vmf.page); else if (dirty_page) { - set_page_dirty_balance(dirty_page, page_mkwrite); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } @@ -2473,8 +2468,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); - pte_unmap(page_table); - return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); + return __do_fault(mm, vma, address, page_table, pmd, pgoff, + flags, orig_pte); } @@ -2557,7 +2552,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, } pgoff = pte_to_pgoff(orig_pte); - return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); + + return __do_fault(mm, vma, address, page_table, pmd, pgoff, + flags, orig_pte); } /* diff --git a/trunk/mm/page-writeback.c b/trunk/mm/page-writeback.c index 44720363374c..63512a9ed57e 100644 --- a/trunk/mm/page-writeback.c +++ b/trunk/mm/page-writeback.c @@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping) pdflush_operation(background_writeout, 0); } -void set_page_dirty_balance(struct page *page, int page_mkwrite) +void set_page_dirty_balance(struct page *page) { - if (set_page_dirty(page) || page_mkwrite) { + if (set_page_dirty(page)) { struct address_space *mapping = page_mapping(page); if (mapping) diff --git a/trunk/net/ieee80211/ieee80211_rx.c b/trunk/net/ieee80211/ieee80211_rx.c index 6284c99b456e..f2de2e48b021 100644 --- a/trunk/net/ieee80211/ieee80211_rx.c +++ b/trunk/net/ieee80211/ieee80211_rx.c @@ -366,12 +366,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); - if (skb->len < hdrlen) { - printk(KERN_INFO "%s: invalid SKB length %d\n", - dev->name, skb->len); - goto rx_dropped; - } - /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef CONFIG_WIRELESS_EXT diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c b/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c index e475f2e1be13..afb6c6698b27 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -273,6 +273,8 @@ ieee80211softmac_assoc_work(struct work_struct *work) ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); if (ieee80211softmac_start_scan(mac)) { dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); + mac->associnfo.associating = 0; + mac->associnfo.associated = 0; } goto out; } else { diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c b/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c index 5742dc803b79..d054e9224b3e 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -70,30 +70,44 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, char *extra) { struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); + struct ieee80211softmac_network *n; struct ieee80211softmac_auth_queue_item *authptr; int length = 0; check_assoc_again: mutex_lock(&sm->associnfo.mutex); + /* Check if we're already associating to this or another network + * If it's another network, cancel and start over with our new network + * If it's our network, ignore the change, we're already doing it! + */ if((sm->associnfo.associating || sm->associnfo.associated) && (data->essid.flags && data->essid.length)) { - dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); - /* Cancel assoc work */ - cancel_delayed_work(&sm->associnfo.work); - /* We don't have to do this, but it's a little cleaner */ - list_for_each_entry(authptr, &sm->auth_queue, list) - cancel_delayed_work(&authptr->work); - sm->associnfo.bssvalid = 0; - sm->associnfo.bssfixed = 0; - sm->associnfo.associating = 0; - sm->associnfo.associated = 0; - /* We must unlock to avoid deadlocks with the assoc workqueue - * on the associnfo.mutex */ - mutex_unlock(&sm->associnfo.mutex); - flush_scheduled_work(); - /* Avoid race! Check assoc status again. Maybe someone started an - * association while we flushed. */ - goto check_assoc_again; + /* Get the associating network */ + n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); + if(n && n->essid.len == data->essid.length && + !memcmp(n->essid.data, extra, n->essid.len)) { + dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", + MAC_ARG(sm->associnfo.bssid)); + goto out; + } else { + dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); + /* Cancel assoc work */ + cancel_delayed_work(&sm->associnfo.work); + /* We don't have to do this, but it's a little cleaner */ + list_for_each_entry(authptr, &sm->auth_queue, list) + cancel_delayed_work(&authptr->work); + sm->associnfo.bssvalid = 0; + sm->associnfo.bssfixed = 0; + sm->associnfo.associating = 0; + sm->associnfo.associated = 0; + /* We must unlock to avoid deadlocks with the assoc workqueue + * on the associnfo.mutex */ + mutex_unlock(&sm->associnfo.mutex); + flush_scheduled_work(); + /* Avoid race! Check assoc status again. Maybe someone started an + * association while we flushed. */ + goto check_assoc_again; + } } sm->associnfo.static_essid = 0; @@ -114,7 +128,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.associating = 1; /* queue lower level code to do work (if necessary) */ schedule_delayed_work(&sm->associnfo.work, 0); - +out: mutex_unlock(&sm->associnfo.mutex); return 0; @@ -139,13 +153,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev, data->essid.length = sm->associnfo.req_essid.len; data->essid.flags = 1; /* active */ memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); - dprintk(KERN_INFO PFX "Getting essid from req_essid\n"); - } else if (sm->associnfo.associated || sm->associnfo.associating) { + } + /* If we're associating/associated, return that */ + if (sm->associnfo.associated || sm->associnfo.associating) { data->essid.length = sm->associnfo.associate_essid.len; data->essid.flags = 1; /* active */ memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); - dprintk(KERN_INFO PFX "Getting essid from associate_essid\n"); } mutex_unlock(&sm->associnfo.mutex); diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index f893e90061eb..bbad2cdb74b7 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -2420,9 +2420,6 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, __u32 dval = min(tp->fackets_out, packets_acked); tp->fackets_out -= dval; } - /* hint's skb might be NULL but we don't need to care */ - tp->fastpath_cnt_hint -= min_t(u32, packets_acked, - tp->fastpath_cnt_hint); tp->packets_out -= packets_acked; BUG_ON(tcp_skb_pcount(skb) == 0); diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index e089a978e128..9c94627c8c7e 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -833,7 +833,8 @@ static struct tcp_md5sig_key * return NULL; for (i = 0; i < tp->md5sig_info->entries4; i++) { if (tp->md5sig_info->keys4[i].addr == addr) - return &tp->md5sig_info->keys4[i].base; + return (struct tcp_md5sig_key *) + &tp->md5sig_info->keys4[i]; } return NULL; } @@ -864,9 +865,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); if (key) { /* Pre-existing entry - just update that one. */ - kfree(key->base.key); - key->base.key = newkey; - key->base.keylen = newkeylen; + kfree(key->key); + key->key = newkey; + key->keylen = newkeylen; } else { struct tcp_md5sig_info *md5sig; @@ -905,9 +906,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, md5sig->alloced4++; } md5sig->entries4++; - md5sig->keys4[md5sig->entries4 - 1].addr = addr; - md5sig->keys4[md5sig->entries4 - 1].base.key = newkey; - md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen; + md5sig->keys4[md5sig->entries4 - 1].addr = addr; + md5sig->keys4[md5sig->entries4 - 1].key = newkey; + md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; } return 0; } @@ -929,7 +930,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) for (i = 0; i < tp->md5sig_info->entries4; i++) { if (tp->md5sig_info->keys4[i].addr == addr) { /* Free the key */ - kfree(tp->md5sig_info->keys4[i].base.key); + kfree(tp->md5sig_info->keys4[i].key); tp->md5sig_info->entries4--; if (tp->md5sig_info->entries4 == 0) { @@ -963,7 +964,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk) if (tp->md5sig_info->entries4) { int i; for (i = 0; i < tp->md5sig_info->entries4; i++) - kfree(tp->md5sig_info->keys4[i].base.key); + kfree(tp->md5sig_info->keys4[i].key); tp->md5sig_info->entries4 = 0; tcp_free_md5sig_pool(); } diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c index 5b596659177c..73a894a2152c 100644 --- a/trunk/net/ipv6/ndisc.c +++ b/trunk/net/ipv6/ndisc.c @@ -1268,10 +1268,9 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) if (ipv6_addr_equal(dest, target)) { on_link = 1; - } else if (ipv6_addr_type(target) != - (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { + } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + "ICMPv6 Redirect: target address is not link-local.\n"); return; } @@ -1345,9 +1344,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, } if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && - ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { + !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + "ICMPv6 Redirect: target address is not link-local.\n"); return; } diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index 3e06799b37a6..0f7defb482e9 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, for (i = 0; i < tp->md5sig_info->entries6; i++) { if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) - return &tp->md5sig_info->keys6[i].base; + return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; } return NULL; } @@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); if (key) { /* modify existing entry - just update that one */ - kfree(key->base.key); - key->base.key = newkey; - key->base.keylen = newkeylen; + kfree(key->key); + key->key = newkey; + key->keylen = newkeylen; } else { /* reallocate new list if current one is full. */ if (!tp->md5sig_info) { @@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, peer); - tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; - tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; + tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; + tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; tp->md5sig_info->entries6++; } @@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) for (i = 0; i < tp->md5sig_info->entries6; i++) { if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { /* Free the key */ - kfree(tp->md5sig_info->keys6[i].base.key); + kfree(tp->md5sig_info->keys6[i].key); tp->md5sig_info->entries6--; if (tp->md5sig_info->entries6 == 0) { @@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) if (tp->md5sig_info->entries6) { for (i = 0; i < tp->md5sig_info->entries6; i++) - kfree(tp->md5sig_info->keys6[i].base.key); + kfree(tp->md5sig_info->keys6[i].key); tp->md5sig_info->entries6 = 0; tcp_free_md5sig_pool(); } @@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) if (tp->md5sig_info->entries4) { for (i = 0; i < tp->md5sig_info->entries4; i++) - kfree(tp->md5sig_info->keys4[i].base.key); + kfree(tp->md5sig_info->keys4[i].key); tp->md5sig_info->entries4 = 0; tcp_free_md5sig_pool(); } diff --git a/trunk/net/mac80211/ieee80211.c b/trunk/net/mac80211/ieee80211.c index ff2172ffd861..7286c389a4d0 100644 --- a/trunk/net/mac80211/ieee80211.c +++ b/trunk/net/mac80211/ieee80211.c @@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void) } -subsys_initcall(ieee80211_init); +module_init(ieee80211_init); module_exit(ieee80211_exit); MODULE_DESCRIPTION("IEEE 802.11 subsystem"); diff --git a/trunk/net/mac80211/rc80211_simple.c b/trunk/net/mac80211/rc80211_simple.c index 17b9f46bbf2b..f6780d63b342 100644 --- a/trunk/net/mac80211/rc80211_simple.c +++ b/trunk/net/mac80211/rc80211_simple.c @@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void) } -subsys_initcall(rate_control_simple_init); +module_init(rate_control_simple_init); module_exit(rate_control_simple_exit); MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); diff --git a/trunk/net/mac80211/wme.c b/trunk/net/mac80211/wme.c index 7ab82b376e1b..89ce81529694 100644 --- a/trunk/net/mac80211/wme.c +++ b/trunk/net/mac80211/wme.c @@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt) skb_queue_head_init(&q->requeued[i]); q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, qd->handle); - if (!q->queues[i]) { + if (q->queues[i] == 0) { q->queues[i] = &noop_qdisc; printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); } diff --git a/trunk/net/rose/rose_loopback.c b/trunk/net/rose/rose_loopback.c index 114df6eec8c3..cd01642f0491 100644 --- a/trunk/net/rose/rose_loopback.c +++ b/trunk/net/rose/rose_loopback.c @@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param) skb_reset_transport_header(skb); - sk = rose_find_socket(lci_o, rose_loopback_neigh); + sk = rose_find_socket(lci_o, &rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); @@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param) if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { - if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) + if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); diff --git a/trunk/net/rose/rose_route.c b/trunk/net/rose/rose_route.c index 96f61a71b252..bbcbad1da0d0 100644 --- a/trunk/net/rose/rose_route.c +++ b/trunk/net/rose/rose_route.c @@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock); static struct rose_route *rose_route_list; static DEFINE_SPINLOCK(rose_route_list_lock); -struct rose_neigh *rose_loopback_neigh; +struct rose_neigh rose_loopback_neigh; /* * Add a new route to a node, and in the process add the node and the @@ -362,12 +362,7 @@ static int rose_del_node(struct rose_route_struct *rose_route, */ void rose_add_loopback_neigh(void) { - struct rose_neigh *sn; - - rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL); - if (!rose_loopback_neigh) - return; - sn = rose_loopback_neigh; + struct rose_neigh *sn = &rose_loopback_neigh; sn->callsign = null_ax25_address; sn->digipeat = NULL; @@ -422,13 +417,13 @@ int rose_add_loopback_node(rose_address *address) rose_node->mask = 10; rose_node->count = 1; rose_node->loopback = 1; - rose_node->neighbour[0] = rose_loopback_neigh; + rose_node->neighbour[0] = &rose_loopback_neigh; /* Insert at the head of list. Address is always mask=10 */ rose_node->next = rose_node_list; rose_node_list = rose_node; - rose_loopback_neigh->count++; + rose_loopback_neigh.count++; out: spin_unlock_bh(&rose_node_list_lock); @@ -459,7 +454,7 @@ void rose_del_loopback_node(rose_address *address) rose_remove_node(rose_node); - rose_loopback_neigh->count--; + rose_loopback_neigh.count--; out: spin_unlock_bh(&rose_node_list_lock); diff --git a/trunk/net/sched/cls_u32.c b/trunk/net/sched/cls_u32.c index d4d5d2f271d2..8dbe36912ecb 100644 --- a/trunk/net/sched/cls_u32.c +++ b/trunk/net/sched/cls_u32.c @@ -502,7 +502,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base, #ifdef CONFIG_NET_CLS_IND if (tb[TCA_U32_INDEV-1]) { - err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); + int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); if (err < 0) goto errout; } diff --git a/trunk/net/sched/sch_sfq.c b/trunk/net/sched/sch_sfq.c index b542c875e154..3a23e30bc79e 100644 --- a/trunk/net/sched/sch_sfq.c +++ b/trunk/net/sched/sch_sfq.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -96,7 +95,7 @@ struct sfq_sched_data /* Variables */ struct timer_list perturb_timer; - u32 perturbation; + int perturbation; sfq_index tail; /* Index of current slot in round */ sfq_index max_depth; /* Maximal depth */ @@ -110,7 +109,12 @@ struct sfq_sched_data static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) { - return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); + int pert = q->perturbation; + + /* Have we any rotation primitives? If not, WHY? */ + h ^= (h1<>(0x1F - pert)); + h ^= h>>10; + return h & 0x3FF; } static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) @@ -252,13 +256,6 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } - /* If selected queue has length q->limit, this means that - * all another queues are empty and that we do simple tail drop, - * i.e. drop _this_ packet. - */ - if (q->qs[x].qlen >= q->limit) - return qdisc_drop(skb, sch); - sch->qstats.backlog += skb->len; __skb_queue_tail(&q->qs[x], skb); sfq_inc(q, x); @@ -297,19 +294,6 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) } sch->qstats.backlog += skb->len; __skb_queue_head(&q->qs[x], skb); - /* If selected queue has length q->limit+1, this means that - * all another queues are empty and we do simple tail drop. - * This packet is still requeued at head of queue, tail packet - * is dropped. - */ - if (q->qs[x].qlen > q->limit) { - skb = q->qs[x].prev; - __skb_unlink(skb, &q->qs[x]); - sch->qstats.drops++; - sch->qstats.backlog -= skb->len; - kfree_skb(skb); - return NET_XMIT_CN; - } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ @@ -386,10 +370,12 @@ static void sfq_perturbation(unsigned long arg) struct Qdisc *sch = (struct Qdisc*)arg; struct sfq_sched_data *q = qdisc_priv(sch); - get_random_bytes(&q->perturbation, 4); + q->perturbation = net_random()&0x1F; - if (q->perturb_period) - mod_timer(&q->perturb_timer, jiffies + q->perturb_period); + if (q->perturb_period) { + q->perturb_timer.expires = jiffies + q->perturb_period; + add_timer(&q->perturb_timer); + } } static int sfq_change(struct Qdisc *sch, struct rtattr *opt) @@ -405,7 +391,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) q->quantum = ctl->quantum ? : psched_mtu(sch->dev); q->perturb_period = ctl->perturb_period*HZ; if (ctl->limit) - q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); + q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 2); qlen = sch->q.qlen; while (sch->q.qlen > q->limit) @@ -414,8 +400,8 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) del_timer(&q->perturb_timer); if (q->perturb_period) { - mod_timer(&q->perturb_timer, jiffies + q->perturb_period); - get_random_bytes(&q->perturbation, 4); + q->perturb_timer.expires = jiffies + q->perturb_period; + add_timer(&q->perturb_timer); } sch_tree_unlock(sch); return 0; @@ -437,13 +423,12 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt) q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; } - q->limit = SFQ_DEPTH - 1; + q->limit = SFQ_DEPTH - 2; q->max_depth = 0; q->tail = SFQ_DEPTH; if (opt == NULL) { q->quantum = psched_mtu(sch->dev); q->perturb_period = 0; - get_random_bytes(&q->perturbation, 4); } else { int err = sfq_change(sch, opt); if (err) diff --git a/trunk/net/sctp/bind_addr.c b/trunk/net/sctp/bind_addr.c index dfffa94fb9f6..d35cbf5aae33 100644 --- a/trunk/net/sctp/bind_addr.c +++ b/trunk/net/sctp/bind_addr.c @@ -181,7 +181,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, * structure. */ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, - void fastcall (*rcu_call)(struct rcu_head *head, + void (*rcu_call)(struct rcu_head *head, void (*func)(struct rcu_head *head))) { struct sctp_sockaddr_entry *addr, *temp; diff --git a/trunk/net/sctp/input.c b/trunk/net/sctp/input.c index f9a0c9276e3b..47e56017f4ce 100644 --- a/trunk/net/sctp/input.c +++ b/trunk/net/sctp/input.c @@ -622,14 +622,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb) if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) goto discard; - /* RFC 4460, 2.11.2 - * This will discard packets with INIT chunk bundled as - * subsequent chunks in the packet. When INIT is first, - * the normal INIT processing will discard the chunk. - */ - if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) - goto discard; - /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR * or a COOKIE ACK the SCTP Packet should be silently * discarded. diff --git a/trunk/net/sctp/inqueue.c b/trunk/net/sctp/inqueue.c index e4ea7fdf36ed..88aa22407549 100644 --- a/trunk/net/sctp/inqueue.c +++ b/trunk/net/sctp/inqueue.c @@ -130,14 +130,6 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) /* Force chunk->skb->data to chunk->chunk_end. */ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); - - /* Verify that we have at least chunk headers - * worth of buffer left. - */ - if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { - sctp_chunk_free(chunk); - chunk = queue->in_progress = NULL; - } } } diff --git a/trunk/net/sctp/sm_make_chunk.c b/trunk/net/sctp/sm_make_chunk.c index 23ae37ec8711..2e34220d94cd 100644 --- a/trunk/net/sctp/sm_make_chunk.c +++ b/trunk/net/sctp/sm_make_chunk.c @@ -2499,52 +2499,6 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, return SCTP_ERROR_NO_ERROR; } -/* Verify the ASCONF packet before we process it. */ -int sctp_verify_asconf(const struct sctp_association *asoc, - struct sctp_paramhdr *param_hdr, void *chunk_end, - struct sctp_paramhdr **errp) { - sctp_addip_param_t *asconf_param; - union sctp_params param; - int length, plen; - - param.v = (sctp_paramhdr_t *) param_hdr; - while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { - length = ntohs(param.p->length); - *errp = param.p; - - if (param.v > chunk_end - length || - length < sizeof(sctp_paramhdr_t)) - return 0; - - switch (param.p->type) { - case SCTP_PARAM_ADD_IP: - case SCTP_PARAM_DEL_IP: - case SCTP_PARAM_SET_PRIMARY: - asconf_param = (sctp_addip_param_t *)param.v; - plen = ntohs(asconf_param->param_hdr.length); - if (plen < sizeof(sctp_addip_param_t) + - sizeof(sctp_paramhdr_t)) - return 0; - break; - case SCTP_PARAM_SUCCESS_REPORT: - case SCTP_PARAM_ADAPTATION_LAYER_IND: - if (length != sizeof(sctp_addip_param_t)) - return 0; - - break; - default: - break; - } - - param.v += WORD_ROUND(length); - } - - if (param.v != chunk_end) - return 0; - - return 1; -} - /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index a583d67cab63..177528ed3e1b 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -90,11 +90,6 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); -static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands); static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, @@ -103,7 +98,6 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, struct sctp_transport *transport); static sctp_disposition_t sctp_sf_abort_violation( - const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, sctp_cmd_seq_t *commands, @@ -117,13 +111,6 @@ static sctp_disposition_t sctp_sf_violation_chunklen( void *arg, sctp_cmd_seq_t *commands); -static sctp_disposition_t sctp_sf_violation_paramlen( - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands); - static sctp_disposition_t sctp_sf_violation_ctsn( const struct sctp_endpoint *ep, const struct sctp_association *asoc, @@ -131,13 +118,6 @@ static sctp_disposition_t sctp_sf_violation_ctsn( void *arg, sctp_cmd_seq_t *commands); -static sctp_disposition_t sctp_sf_violation_chunk( - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands); - /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument * is set to be the size of a specific chunk we are testing. @@ -201,21 +181,16 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; - if (!sctp_vtag_verify_either(chunk, asoc)) - return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - /* RFC 2960 6.10 Bundling * * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. */ if (!chunk->singleton) - return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_VIOLATION; - /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); /* RFC 2960 10.2 SCTP-to-ULP * @@ -475,17 +450,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + /* Make sure that the INIT-ACK chunk has a valid length */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) + return sctp_sf_violation_chunklen(ep, asoc, type, arg, + commands); /* 6.10 Bundling * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. */ if (!chunk->singleton) - return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_VIOLATION; - /* Make sure that the INIT-ACK chunk has a valid length */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); /* Grab the INIT header. */ chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; @@ -610,7 +585,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, * control endpoint, respond with an ABORT. */ if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) - return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); + return sctp_sf_ootb(ep, asoc, type, arg, commands); /* Make sure that the COOKIE_ECHO chunk has a valid length. * In this case, we check that we have enough for at least a @@ -2521,11 +2496,6 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep, struct sctp_chunk *chunk = (struct sctp_chunk *) arg; struct sctp_chunk *reply; - /* Make sure that the chunk has a valid length */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); - /* Since we are not going to really process this INIT, there * is no point in verifying chunk boundries. Just generate * the SHUTDOWN ACK. @@ -2959,7 +2929,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, * * The return value is the disposition of the chunk. */ -static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, +sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, @@ -2995,7 +2965,6 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); - sctp_sf_pdiscard(ep, asoc, type, arg, commands); return SCTP_DISPOSITION_CONSUME; } @@ -3156,14 +3125,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; do { - /* Report violation if the chunk is less then minimal */ + /* Break out if chunk length is less then minimal. */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); + break; + + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb_tail_pointer(skb)) + break; - /* Now that we know we at least have a chunk header, - * do things that are type appropriate. - */ if (SCTP_CID_SHUTDOWN_ACK == ch->type) ootb_shut_ack = 1; @@ -3175,19 +3144,15 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, if (SCTP_CID_ABORT == ch->type) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - /* Report violation if chunk len overflows */ - ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb_tail_pointer(skb)) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); - ch = (sctp_chunkhdr_t *) ch_end; } while (ch_end < skb_tail_pointer(skb)); if (ootb_shut_ack) - return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); + sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); else - return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); + sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); + + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } /* @@ -3253,11 +3218,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - /* We need to discard the rest of the packet to prevent - * potential bomming attacks from additional bundled chunks. - * This is documented in SCTP Threats ID. - */ - return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_CONSUME; } return SCTP_DISPOSITION_NOMEM; @@ -3280,13 +3241,6 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; - - /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); - /* Although we do have an association in this case, it corresponds * to a restarted association. So the packet is treated as an OOTB * packet and the state function that handles OOTB SHUTDOWN_ACK is @@ -3303,11 +3257,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; - struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; - union sctp_addr_param *addr_param; __u32 serial; - int length; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, @@ -3323,20 +3274,6 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); - addr_param = (union sctp_addr_param *)hdr->params; - length = ntohs(addr_param->p.length); - if (length < sizeof(sctp_paramhdr_t)) - return sctp_sf_violation_paramlen(ep, asoc, type, - (void *)addr_param, commands); - - /* Verify the ASCONF chunk before processing it. */ - if (!sctp_verify_asconf(asoc, - (sctp_paramhdr_t *)((void *)addr_param + length), - (void *)chunk->chunk_end, - &err_param)) - return sctp_sf_violation_paramlen(ep, asoc, type, - (void *)&err_param, commands); - /* ADDIP 4.2 C1) Compare the value of the serial number to the value * the endpoint stored in a new association variable * 'Peer-Serial-Number'. @@ -3391,7 +3328,6 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, struct sctp_chunk *asconf_ack = arg; struct sctp_chunk *last_asconf = asoc->addip_last_asconf; struct sctp_chunk *abort; - struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *addip_hdr; __u32 sent_serial, rcvd_serial; @@ -3409,14 +3345,6 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; rcvd_serial = ntohl(addip_hdr->serial); - /* Verify the ASCONF-ACK chunk before processing it. */ - if (!sctp_verify_asconf(asoc, - (sctp_paramhdr_t *)addip_hdr->params, - (void *)asconf_ack->chunk_end, - &err_param)) - return sctp_sf_violation_paramlen(ep, asoc, type, - (void *)&err_param, commands); - if (last_asconf) { addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; sent_serial = ntohl(addip_hdr->serial); @@ -3727,16 +3655,6 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; - - /* Make sure that the chunk has a valid length. - * Since we don't know the chunk type, we use a general - * chunkhdr structure to make a comparison. - */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); - SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); return SCTP_DISPOSITION_DISCARD; } @@ -3792,13 +3710,6 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; - - /* Make sure that the chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) - return sctp_sf_violation_chunklen(ep, asoc, type, arg, - commands); - return SCTP_DISPOSITION_VIOLATION; } @@ -3806,14 +3717,12 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, * Common function to handle a protocol violation. */ static sctp_disposition_t sctp_sf_abort_violation( - const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, sctp_cmd_seq_t *commands, const __u8 *payload, const size_t paylen) { - struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; struct sctp_chunk *abort = NULL; @@ -3822,51 +3731,30 @@ static sctp_disposition_t sctp_sf_abort_violation( if (!abort) goto nomem; - if (asoc) { - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); - if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { - sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, - SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNREFUSED)); - sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, - SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); - } else { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ECONNABORTED)); - sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, - SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); - } + if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNREFUSED)); + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); } else { - packet = sctp_ootb_pkt_new(asoc, chunk); - - if (!packet) - goto nomem_pkt; - - if (sctp_test_T_bit(abort)) - packet->vtag = ntohl(chunk->sctp_hdr->vtag); - - abort->skb->sk = ep->base.sk; - - sctp_packet_append_chunk(packet, abort); - - sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, - SCTP_PACKET(packet)); - - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); + SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); } - sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); return SCTP_DISPOSITION_ABORT; -nomem_pkt: - sctp_chunk_free(abort); nomem: return SCTP_DISPOSITION_NOMEM; } @@ -3899,24 +3787,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( { char err_str[]="The following chunk had invalid length:"; - return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, - sizeof(err_str)); -} - -/* - * Handle a protocol violation when the parameter length is invalid. - * "Invalid" length is identified as smaller then the minimal length a - * given parameter can be. - */ -static sctp_disposition_t sctp_sf_violation_paramlen( - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands) { - char err_str[] = "The following parameter had invalid length:"; - - return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, + return sctp_sf_abort_violation(asoc, arg, commands, err_str, sizeof(err_str)); } @@ -3935,31 +3806,10 @@ static sctp_disposition_t sctp_sf_violation_ctsn( { char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; - return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, + return sctp_sf_abort_violation(asoc, arg, commands, err_str, sizeof(err_str)); } -/* Handle protocol violation of an invalid chunk bundling. For example, - * when we have an association and we recieve bundled INIT-ACK, or - * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" - * statement from the specs. Additinally, there might be an attacker - * on the path and we may not want to continue this communication. - */ -static sctp_disposition_t sctp_sf_violation_chunk( - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - void *arg, - sctp_cmd_seq_t *commands) -{ - char err_str[]="The following chunk violates protocol:"; - - if (!asoc) - return sctp_sf_violation(ep, asoc, type, arg, commands); - - return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, - sizeof(err_str)); -} /*************************************************************************** * These are the state functions for handling primitive (Section 10) events. ***************************************************************************/ @@ -5326,22 +5176,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc * association exists, otherwise, use the peer's vtag. */ if (asoc) { - /* Special case the INIT-ACK as there is no peer's vtag - * yet. - */ - switch(chunk->chunk_hdr->type) { - case SCTP_CID_INIT_ACK: - { - sctp_initack_chunk_t *initack; - - initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; - vtag = ntohl(initack->init_hdr.init_tag); - break; - } - default: - vtag = asoc->peer.i.init_tag; - break; - } + vtag = asoc->peer.i.init_tag; } else { /* Special case the INIT and stale COOKIE_ECHO as there is no * vtag yet. diff --git a/trunk/net/sctp/sm_statetable.c b/trunk/net/sctp/sm_statetable.c index ddb0ba3974b0..70a91ece3c49 100644 --- a/trunk/net/sctp/sm_statetable.c +++ b/trunk/net/sctp/sm_statetable.c @@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_violation), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_ /* SCTP_STATE_EMPTY */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_CLOSED */ \ - TYPE_SCTP_FUNC(sctp_sf_ootb), \ + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ @@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_EMPTY */ TYPE_SCTP_FUNC(sctp_sf_ootb), /* SCTP_STATE_CLOSED */ - TYPE_SCTP_FUNC(sctp_sf_ootb), + TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), /* SCTP_STATE_COOKIE_WAIT */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_COOKIE_ECHOED */ diff --git a/trunk/net/socket.c b/trunk/net/socket.c index b09eb9036a17..7d44453dfae1 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -777,6 +777,9 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, if (pos != 0) return -ESPIPE; + if (iocb->ki_left == 0) /* Match SYS5 behaviour */ + return 0; + x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; diff --git a/trunk/net/wireless/core.c b/trunk/net/wireless/core.c index 9771451eae21..7eabd55417a5 100644 --- a/trunk/net/wireless/core.c +++ b/trunk/net/wireless/core.c @@ -213,7 +213,7 @@ static int cfg80211_init(void) out_fail_sysfs: return err; } -subsys_initcall(cfg80211_init); +module_init(cfg80211_init); static void cfg80211_exit(void) { diff --git a/trunk/net/wireless/sysfs.c b/trunk/net/wireless/sysfs.c index 2d5d2255a27c..88aaacd9f822 100644 --- a/trunk/net/wireless/sysfs.c +++ b/trunk/net/wireless/sysfs.c @@ -52,14 +52,12 @@ static void wiphy_dev_release(struct device *dev) cfg80211_dev_free(rdev); } -#ifdef CONFIG_HOTPLUG static int wiphy_uevent(struct device *dev, char **envp, int num_envp, char *buf, int size) { /* TODO, we probably need stuff here */ return 0; } -#endif struct class ieee80211_class = { .name = "ieee80211", diff --git a/trunk/sound/core/memalloc.c b/trunk/sound/core/memalloc.c index 9b5656d8bcca..f057430db0d0 100644 --- a/trunk/sound/core/memalloc.c +++ b/trunk/sound/core/memalloc.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -482,54 +481,53 @@ static void free_all_reserved_pages(void) #define SND_MEM_PROC_FILE "driver/snd-page-alloc" static struct proc_dir_entry *snd_mem_proc; -static int snd_mem_proc_read(struct seq_file *seq, void *offset) +static int snd_mem_proc_read(char *page, char **start, off_t off, + int count, int *eof, void *data) { + int len = 0; long pages = snd_allocated_pages >> (PAGE_SHIFT-12); struct snd_mem_list *mem; int devno; static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; mutex_lock(&list_mutex); - seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", - pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); + len += snprintf(page + len, count - len, + "pages : %li bytes (%li pages per %likB)\n", + pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); devno = 0; list_for_each_entry(mem, &mem_list_head, list) { devno++; - seq_printf(seq, "buffer %d : ID %08x : type %s\n", - devno, mem->id, types[mem->buffer.dev.type]); - seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", - (unsigned long)mem->buffer.addr, - (int)mem->buffer.bytes); + len += snprintf(page + len, count - len, + "buffer %d : ID %08x : type %s\n", + devno, mem->id, types[mem->buffer.dev.type]); + len += snprintf(page + len, count - len, + " addr = 0x%lx, size = %d bytes\n", + (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); } mutex_unlock(&list_mutex); - return 0; -} - -static int snd_mem_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, snd_mem_proc_read, NULL); + return len; } /* FIXME: for pci only - other bus? */ #ifdef CONFIG_PCI #define gettoken(bufp) strsep(bufp, " \t\n") -static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static int snd_mem_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) { char buf[128]; char *token, *p; - if (count > sizeof(buf) - 1) - return -EINVAL; + if (count > ARRAY_SIZE(buf) - 1) + count = ARRAY_SIZE(buf) - 1; if (copy_from_user(buf, buffer, count)) return -EFAULT; - buf[count] = '\0'; + buf[ARRAY_SIZE(buf) - 1] = '\0'; p = buf; token = gettoken(&p); if (! token || *token == '#') - return count; + return (int)count; if (strcmp(token, "add") == 0) { char *endp; int vendor, device, size, buffers; @@ -550,7 +548,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, (buffers = simple_strtol(token, NULL, 0)) <= 0 || buffers > 4) { printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); - return count; + return (int)count; } vendor &= 0xffff; device &= 0xffff; @@ -562,7 +560,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, if (pci_set_dma_mask(pci, mask) < 0 || pci_set_consistent_dma_mask(pci, mask) < 0) { printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); - return count; + return (int)count; } } for (i = 0; i < buffers; i++) { @@ -572,7 +570,7 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, size, &dmab) < 0) { printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); pci_dev_put(pci); - return count; + return (int)count; } snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); } @@ -598,21 +596,9 @@ static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, free_all_reserved_pages(); else printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); - return count; + return (int)count; } #endif /* CONFIG_PCI */ - -static const struct file_operations snd_mem_proc_fops = { - .owner = THIS_MODULE, - .open = snd_mem_proc_open, - .read = seq_read, -#ifdef CONFIG_PCI - .write = snd_mem_proc_write, -#endif - .llseek = seq_lseek, - .release = single_release, -}; - #endif /* CONFIG_PROC_FS */ /* @@ -623,8 +609,12 @@ static int __init snd_mem_init(void) { #ifdef CONFIG_PROC_FS snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); - if (snd_mem_proc) - snd_mem_proc->proc_fops = &snd_mem_proc_fops; + if (snd_mem_proc) { + snd_mem_proc->read_proc = snd_mem_proc_read; +#ifdef CONFIG_PCI + snd_mem_proc->write_proc = snd_mem_proc_write; +#endif + } #endif return 0; }