diff --git a/[refs] b/[refs] index f082b7b92b5b..92f37bde0698 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 633331f389c2d9c631371d09a54626a5e4749452 +refs/heads/master: 6cd5a86b56ec8fc8651c043bdb05ea0c662fb704 diff --git a/trunk/Documentation/DocBook/kgdb.tmpl b/trunk/Documentation/DocBook/kgdb.tmpl index 028a8444d95e..97618bed4d65 100644 --- a/trunk/Documentation/DocBook/kgdb.tmpl +++ b/trunk/Documentation/DocBook/kgdb.tmpl @@ -72,7 +72,7 @@ kgdb is a source level debugger for linux kernel. It is used along with gdb to debug a linux kernel. The expectation is that gdb can be used to "break in" to the kernel to inspect memory, variables - and look through call stack information similar to what an + and look through a cal stack information similar to what an application developer would use gdb for. It is possible to place breakpoints in kernel code and perform some limited execution stepping. @@ -93,10 +93,8 @@ Compiling a kernel - To enable CONFIG_KGDB you should first turn on - "Prompt for development and/or incomplete code/drivers" - (CONFIG_EXPERIMENTAL) in "General setup", then under the - "Kernel debugging" select "KGDB: kernel debugging with remote gdb". + To enable CONFIG_KGDB, look under the "Kernel debugging" + and then select "KGDB: kernel debugging with remote gdb". Next you should choose one of more I/O drivers to interconnect debugging diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index 8b22d7d8b991..c2992bc54f2f 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -92,6 +92,7 @@ prototypes: void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); + void (*put_inode) (struct inode *); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); @@ -114,6 +115,7 @@ alloc_inode: no no no destroy_inode: no dirty_inode: no (must not sleep) write_inode: no +put_inode: no drop_inode: no !!!inode_lock!!! delete_inode: no put_super: yes yes no diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt index b7522c6cbae3..81e5be6e6e35 100644 --- a/trunk/Documentation/filesystems/vfs.txt +++ b/trunk/Documentation/filesystems/vfs.txt @@ -205,6 +205,7 @@ struct super_operations { void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); + void (*put_inode) (struct inode *); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); @@ -245,6 +246,9 @@ or bottom half). inode to disc. The second parameter indicates whether the write should be synchronous or not, not all filesystems check this flag. + put_inode: called when the VFS inode is removed from the inode + cache. + drop_inode: called when the last access to the inode is dropped, with the inode_lock spinlock held. diff --git a/trunk/Documentation/kbuild/kconfig-language.txt b/trunk/Documentation/kbuild/kconfig-language.txt index c412c245848f..00b950d1c193 100644 --- a/trunk/Documentation/kbuild/kconfig-language.txt +++ b/trunk/Documentation/kbuild/kconfig-language.txt @@ -377,3 +377,27 @@ config FOO limits FOO to module (=m) or disabled (=n). + +Build limited by a third config symbol which may be =y or =m +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A common idiom that we see (and sometimes have problems with) is this: + +When option C in B (module or subsystem) uses interfaces from A (module +or subsystem), and both A and B are tristate (could be =y or =m if they +were independent of each other, but they aren't), then we need to limit +C such that it cannot be built statically if A is built as a loadable +module. (C already depends on B, so there is no dependency issue to +take care of here.) + +If A is linked statically into the kernel image, C can be built +statically or as loadable module(s). However, if A is built as loadable +module(s), then C must be restricted to loadable module(s) also. This +can be expressed in kconfig language as: + +config C + depends on A = y || A = B + +or for real examples, use this command in a kernel tree: + +$ find . -name Kconfig\* | xargs grep -ns "depends on.*=.*||.*=" | grep -v orig + diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index cdd5b934f43e..a3c35446e755 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -1094,6 +1094,9 @@ and is between 256 and 4096 characters. It is defined in the file mac5380= [HW,SCSI] Format: ,,,, + mac53c9x= [HW,SCSI] Format: + ,,,,,,, + machvec= [IA64] Force the use of a particular machine-vector (machvec) in a generic kernel. Example: machvec=hpzx1_swiotlb @@ -1522,8 +1525,6 @@ and is between 256 and 4096 characters. It is defined in the file This is normally done in pci_enable_device(), so this option is a temporary workaround for broken drivers that don't call it. - skip_isa_align [X86] do not align io start addr, so can - handle more pci cards firmware [ARM] Do not re-enumerate the bus but instead just use the configuration from the bootloader. This is currently used on diff --git a/trunk/Documentation/s390/CommonIO b/trunk/Documentation/s390/CommonIO index bf0baa19ec24..8fbc0a852870 100644 --- a/trunk/Documentation/s390/CommonIO +++ b/trunk/Documentation/s390/CommonIO @@ -8,6 +8,17 @@ Command line parameters Enable logging of debug information in case of ccw device timeouts. + +* cio_msg = yes | no + + Determines whether information on found devices and sensed device + characteristics should be shown during startup or when new devices are + found, i. e. messages of the types "Detected device 0.0.4711 on subchannel + 0.0.0042" and "SenseID: Device 0.0.4711 reports: ...". + + Default is off. + + * cio_ignore = {all} | { | } | {! | !} diff --git a/trunk/Documentation/scheduler/sched-design.txt b/trunk/Documentation/scheduler/sched-design.txt new file mode 100644 index 000000000000..1605bf0cba8b --- /dev/null +++ b/trunk/Documentation/scheduler/sched-design.txt @@ -0,0 +1,165 @@ + Goals, Design and Implementation of the + new ultra-scalable O(1) scheduler + + + This is an edited version of an email Ingo Molnar sent to + lkml on 4 Jan 2002. It describes the goals, design, and + implementation of Ingo's new ultra-scalable O(1) scheduler. + Last Updated: 18 April 2002. + + +Goal +==== + +The main goal of the new scheduler is to keep all the good things we know +and love about the current Linux scheduler: + + - good interactive performance even during high load: if the user + types or clicks then the system must react instantly and must execute + the user tasks smoothly, even during considerable background load. + + - good scheduling/wakeup performance with 1-2 runnable processes. + + - fairness: no process should stay without any timeslice for any + unreasonable amount of time. No process should get an unjustly high + amount of CPU time. + + - priorities: less important tasks can be started with lower priority, + more important tasks with higher priority. + + - SMP efficiency: no CPU should stay idle if there is work to do. + + - SMP affinity: processes which run on one CPU should stay affine to + that CPU. Processes should not bounce between CPUs too frequently. + + - plus additional scheduler features: RT scheduling, CPU binding. + +and the goal is also to add a few new things: + + - fully O(1) scheduling. Are you tired of the recalculation loop + blowing the L1 cache away every now and then? Do you think the goodness + loop is taking a bit too long to finish if there are lots of runnable + processes? This new scheduler takes no prisoners: wakeup(), schedule(), + the timer interrupt are all O(1) algorithms. There is no recalculation + loop. There is no goodness loop either. + + - 'perfect' SMP scalability. With the new scheduler there is no 'big' + runqueue_lock anymore - it's all per-CPU runqueues and locks - two + tasks on two separate CPUs can wake up, schedule and context-switch + completely in parallel, without any interlocking. All + scheduling-relevant data is structured for maximum scalability. + + - better SMP affinity. The old scheduler has a particular weakness that + causes the random bouncing of tasks between CPUs if/when higher + priority/interactive tasks, this was observed and reported by many + people. The reason is that the timeslice recalculation loop first needs + every currently running task to consume its timeslice. But when this + happens on eg. an 8-way system, then this property starves an + increasing number of CPUs from executing any process. Once the last + task that has a timeslice left has finished using up that timeslice, + the recalculation loop is triggered and other CPUs can start executing + tasks again - after having idled around for a number of timer ticks. + The more CPUs, the worse this effect. + + Furthermore, this same effect causes the bouncing effect as well: + whenever there is such a 'timeslice squeeze' of the global runqueue, + idle processors start executing tasks which are not affine to that CPU. + (because the affine tasks have finished off their timeslices already.) + + The new scheduler solves this problem by distributing timeslices on a + per-CPU basis, without having any global synchronization or + recalculation. + + - batch scheduling. A significant proportion of computing-intensive tasks + benefit from batch-scheduling, where timeslices are long and processes + are roundrobin scheduled. The new scheduler does such batch-scheduling + of the lowest priority tasks - so nice +19 jobs will get + 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are + in essence SCHED_IDLE, from an interactiveness point of view. + + - handle extreme loads more smoothly, without breakdown and scheduling + storms. + + - O(1) RT scheduling. For those RT folks who are paranoid about the + O(nr_running) property of the goodness loop and the recalculation loop. + + - run fork()ed children before the parent. Andrea has pointed out the + advantages of this a few months ago, but patches for this feature + do not work with the old scheduler as well as they should, + because idle processes often steal the new child before the fork()ing + CPU gets to execute it. + + +Design +====== + +The core of the new scheduler contains the following mechanisms: + + - *two* priority-ordered 'priority arrays' per CPU. There is an 'active' + array and an 'expired' array. The active array contains all tasks that + are affine to this CPU and have timeslices left. The expired array + contains all tasks which have used up their timeslices - but this array + is kept sorted as well. The active and expired array is not accessed + directly, it's accessed through two pointers in the per-CPU runqueue + structure. If all active tasks are used up then we 'switch' the two + pointers and from now on the ready-to-go (former-) expired array is the + active array - and the empty active array serves as the new collector + for expired tasks. + + - there is a 64-bit bitmap cache for array indices. Finding the highest + priority task is thus a matter of two x86 BSFL bit-search instructions. + +the split-array solution enables us to have an arbitrary number of active +and expired tasks, and the recalculation of timeslices can be done +immediately when the timeslice expires. Because the arrays are always +access through the pointers in the runqueue, switching the two arrays can +be done very quickly. + +this is a hybride priority-list approach coupled with roundrobin +scheduling and the array-switch method of distributing timeslices. + + - there is a per-task 'load estimator'. + +one of the toughest things to get right is good interactive feel during +heavy system load. While playing with various scheduler variants i found +that the best interactive feel is achieved not by 'boosting' interactive +tasks, but by 'punishing' tasks that want to use more CPU time than there +is available. This method is also much easier to do in an O(1) fashion. + +to establish the actual 'load' the task contributes to the system, a +complex-looking but pretty accurate method is used: there is a 4-entry +'history' ringbuffer of the task's activities during the last 4 seconds. +This ringbuffer is operated without much overhead. The entries tell the +scheduler a pretty accurate load-history of the task: has it used up more +CPU time or less during the past N seconds. [the size '4' and the interval +of 4x 1 seconds was found by lots of experimentation - this part is +flexible and can be changed in both directions.] + +the penalty a task gets for generating more load than the CPU can handle +is a priority decrease - there is a maximum amount to this penalty +relative to their static priority, so even fully CPU-bound tasks will +observe each other's priorities, and will share the CPU accordingly. + +the SMP load-balancer can be extended/switched with additional parallel +computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs +can be supported easily by changing the load-balancer. Right now it's +tuned for my SMP systems. + +i skipped the prev->mm == next->mm advantage - no workload i know of shows +any sensitivity to this. It can be added back by sacrificing O(1) +schedule() [the current and one-lower priority list can be searched for a +that->mm == current->mm condition], but costs a fair number of cycles +during a number of important workloads, so i wanted to avoid this as much +as possible. + +- the SMP idle-task startup code was still racy and the new scheduler +triggered this. So i streamlined the idle-setup code a bit. We do not call +into schedule() before all processors have started up fully and all idle +threads are in place. + +- the patch also cleans up a number of aspects of sched.c - moves code +into other areas of the kernel where it's appropriate, and simplifies +certain code paths and data constructs. As a result, the new scheduler's +code is smaller than the old one. + + Ingo diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index f5583dc7ea39..abe27871ad6a 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2112,10 +2112,12 @@ L: netdev@vger.kernel.org S: Maintained INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) -P: Jeff Kirsher -M: jeffrey.t.kirsher@intel.com +P: Auke Kok +M: auke-jan.h.kok@intel.com P: Jesse Brandeburg M: jesse.brandeburg@intel.com +P: Jeff Kirsher +M: jeffrey.t.kirsher@intel.com P: Bruce Allan M: bruce.w.allan@intel.com P: John Ronciak diff --git a/trunk/arch/arm/mach-ep93xx/core.c b/trunk/arch/arm/mach-ep93xx/core.c index 1d7bca6aa441..8bc187240542 100644 --- a/trunk/arch/arm/mach-ep93xx/core.c +++ b/trunk/arch/arm/mach-ep93xx/core.c @@ -280,7 +280,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type) const int port = gpio >> 3; const int port_mask = 1 << (gpio & 7); - gpio_direction_input(gpio); + gpio_direction_output(gpio, gpio_get_value(gpio)); switch (type) { case IRQT_RISING: diff --git a/trunk/arch/arm/mach-ns9xxx/irq.c b/trunk/arch/arm/mach-ns9xxx/irq.c index ca85d24cf39f..36e5835e6097 100644 --- a/trunk/arch/arm/mach-ns9xxx/irq.c +++ b/trunk/arch/arm/mach-ns9xxx/irq.c @@ -62,7 +62,7 @@ static struct irq_chip ns9xxx_chip = { #if 0 #define handle_irq handle_level_irq #else -static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) +void handle_prio_irq(unsigned int irq, struct irq_desc *desc) { unsigned int cpu = smp_processor_id(); struct irqaction *action; @@ -70,35 +70,27 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) spin_lock(&desc->lock); - BUG_ON(desc->status & IRQ_INPROGRESS); + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out_unlock; desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); kstat_cpu(cpu).irqs[irq]++; action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) - goto out_mask; + goto out_unlock; desc->status |= IRQ_INPROGRESS; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); - /* XXX: There is no direct way to access noirqdebug, so check - * unconditionally for spurious irqs... - * Maybe this function should go to kernel/irq/chip.c? */ - note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED) && desc->chip->ack) + desc->chip->ack(irq); - if (desc->status & IRQ_DISABLED) -out_mask: - desc->chip->mask(irq); - - /* ack unconditionally to unmask lower prio irqs */ - desc->chip->ack(irq); - +out_unlock: spin_unlock(&desc->lock); } #define handle_irq handle_prio_irq diff --git a/trunk/arch/arm/mach-orion5x/addr-map.c b/trunk/arch/arm/mach-orion5x/addr-map.c index e63fb05dc893..9608503d67f5 100644 --- a/trunk/arch/arm/mach-orion5x/addr-map.c +++ b/trunk/arch/arm/mach-orion5x/addr-map.c @@ -34,7 +34,11 @@ * Non-CPU Masters address decoding -- * Unlike the CPU, we setup the access from Orion's master interfaces to DDR * banks only (the typical use case). - * Setup access for each master to DDR is issued by platform device setup. + * Setup access for each master to DDR is issued by common.c. + * + * Note: although orion_setbits() and orion_clrbits() are not atomic + * no locking is necessary here since code in this file is only called + * at boot time when there is no concurrency issues. */ /* @@ -44,6 +48,10 @@ #define TARGET_DEV_BUS 1 #define TARGET_PCI 3 #define TARGET_PCIE 4 +#define ATTR_DDR_CS(n) (((n) ==0) ? 0xe : \ + ((n) == 1) ? 0xd : \ + ((n) == 2) ? 0xb : \ + ((n) == 3) ? 0x7 : 0xf) #define ATTR_PCIE_MEM 0x59 #define ATTR_PCIE_IO 0x51 #define ATTR_PCIE_WA 0x79 @@ -53,12 +61,17 @@ #define ATTR_DEV_CS1 0x1d #define ATTR_DEV_CS2 0x1b #define ATTR_DEV_BOOT 0xf +#define WIN_EN 1 /* * Helpers to get DDR bank info */ -#define DDR_BASE_CS(n) ORION5X_DDR_REG(0x1500 + ((n) << 3)) -#define DDR_SIZE_CS(n) ORION5X_DDR_REG(0x1504 + ((n) << 3)) +#define DDR_BASE_CS(n) ORION5X_DDR_REG(0x1500 + ((n) * 8)) +#define DDR_SIZE_CS(n) ORION5X_DDR_REG(0x1504 + ((n) * 8)) +#define DDR_MAX_CS 4 +#define DDR_REG_TO_SIZE(reg) (((reg) | 0xffffff) + 1) +#define DDR_REG_TO_BASE(reg) ((reg) & 0xff000000) +#define DDR_BANK_EN 1 /* * CPU Address Decode Windows registers @@ -68,6 +81,17 @@ #define CPU_WIN_REMAP_LO(n) ORION5X_BRIDGE_REG(0x008 | ((n) << 4)) #define CPU_WIN_REMAP_HI(n) ORION5X_BRIDGE_REG(0x00c | ((n) << 4)) +/* + * Gigabit Ethernet Address Decode Windows registers + */ +#define ETH_WIN_BASE(win) ORION5X_ETH_REG(0x200 + ((win) * 8)) +#define ETH_WIN_SIZE(win) ORION5X_ETH_REG(0x204 + ((win) * 8)) +#define ETH_WIN_REMAP(win) ORION5X_ETH_REG(0x280 + ((win) * 4)) +#define ETH_WIN_EN ORION5X_ETH_REG(0x290) +#define ETH_WIN_PROT ORION5X_ETH_REG(0x294) +#define ETH_MAX_WIN 6 +#define ETH_MAX_REMAP_WIN 4 + struct mbus_dram_target_info orion5x_mbus_dram_info; @@ -178,3 +202,39 @@ void __init orion5x_setup_pcie_wa_win(u32 base, u32 size) { setup_cpu_win(7, base, size, TARGET_PCIE, ATTR_PCIE_WA, -1); } + +void __init orion5x_setup_eth_wins(void) +{ + int i; + + /* + * First, disable and clear windows + */ + for (i = 0; i < ETH_MAX_WIN; i++) { + orion5x_write(ETH_WIN_BASE(i), 0); + orion5x_write(ETH_WIN_SIZE(i), 0); + orion5x_setbits(ETH_WIN_EN, 1 << i); + orion5x_clrbits(ETH_WIN_PROT, 0x3 << (i * 2)); + if (i < ETH_MAX_REMAP_WIN) + orion5x_write(ETH_WIN_REMAP(i), 0); + } + + /* + * Setup windows for DDR banks. + */ + for (i = 0; i < DDR_MAX_CS; i++) { + u32 base, size; + size = orion5x_read(DDR_SIZE_CS(i)); + base = orion5x_read(DDR_BASE_CS(i)); + if (size & DDR_BANK_EN) { + base = DDR_REG_TO_BASE(base); + size = DDR_REG_TO_SIZE(size); + orion5x_write(ETH_WIN_SIZE(i), (size-1) & 0xffff0000); + orion5x_write(ETH_WIN_BASE(i), (base & 0xffff0000) | + (ATTR_DDR_CS(i) << 8) | + TARGET_DDR); + orion5x_clrbits(ETH_WIN_EN, 1 << i); + orion5x_setbits(ETH_WIN_PROT, 0x3 << (i * 2)); + } + } +} diff --git a/trunk/arch/arm/mach-orion5x/common.c b/trunk/arch/arm/mach-orion5x/common.c index 4f13fd037f04..968deb58be01 100644 --- a/trunk/arch/arm/mach-orion5x/common.c +++ b/trunk/arch/arm/mach-orion5x/common.c @@ -190,11 +190,6 @@ static struct platform_device orion5x_ehci1 = { * (The Orion and Discovery (MV643xx) families use the same Ethernet driver) ****************************************************************************/ -struct mv643xx_eth_shared_platform_data orion5x_eth_shared_data = { - .dram = &orion5x_mbus_dram_info, - .t_clk = ORION5X_TCLK, -}; - static struct resource orion5x_eth_shared_resources[] = { { .start = ORION5X_ETH_PHYS_BASE + 0x2000, @@ -206,9 +201,6 @@ static struct resource orion5x_eth_shared_resources[] = { static struct platform_device orion5x_eth_shared = { .name = MV643XX_ETH_SHARED_NAME, .id = 0, - .dev = { - .platform_data = &orion5x_eth_shared_data, - }, .num_resources = 1, .resource = orion5x_eth_shared_resources, }; @@ -231,9 +223,7 @@ static struct platform_device orion5x_eth = { void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) { - eth_data->shared = &orion5x_eth_shared; orion5x_eth.dev.platform_data = eth_data; - platform_device_register(&orion5x_eth_shared); platform_device_register(&orion5x_eth); } @@ -370,6 +360,7 @@ void __init orion5x_init(void) * Setup Orion address map */ orion5x_setup_cpu_mbus_bridge(); + orion5x_setup_eth_wins(); /* * Register devices. diff --git a/trunk/arch/arm/mach-orion5x/common.h b/trunk/arch/arm/mach-orion5x/common.h index bd0f05de6e18..14adf8d1a54a 100644 --- a/trunk/arch/arm/mach-orion5x/common.h +++ b/trunk/arch/arm/mach-orion5x/common.h @@ -22,6 +22,7 @@ void orion5x_setup_dev0_win(u32 base, u32 size); void orion5x_setup_dev1_win(u32 base, u32 size); void orion5x_setup_dev2_win(u32 base, u32 size); void orion5x_setup_pcie_wa_win(u32 base, u32 size); +void orion5x_setup_eth_wins(void); /* * Shared code used internally by other Orion core functions. diff --git a/trunk/arch/arm/mach-pxa/Makefile b/trunk/arch/arm/mach-pxa/Makefile index 0e6d05bb81aa..6a830853aa6a 100644 --- a/trunk/arch/arm/mach-pxa/Makefile +++ b/trunk/arch/arm/mach-pxa/Makefile @@ -5,13 +5,6 @@ # Common support (must be linked before board specific support) obj-y += clock.o devices.o generic.o irq.o dma.o \ time.o gpio.o -obj-$(CONFIG_PM) += pm.o sleep.o standby.o -obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o - -# Generic drivers that other drivers may depend upon -obj-$(CONFIG_PXA_SSP) += ssp.o - -# SoC-specific code obj-$(CONFIG_PXA25x) += mfp-pxa2xx.o pxa25x.o obj-$(CONFIG_PXA27x) += mfp-pxa2xx.o pxa27x.o obj-$(CONFIG_PXA3xx) += mfp-pxa3xx.o pxa3xx.o smemc.o @@ -55,6 +48,11 @@ led-$(CONFIG_MACH_TRIZEPS4) += leds-trizeps4.o obj-$(CONFIG_LEDS) += $(led-y) +# Misc features +obj-$(CONFIG_PM) += pm.o sleep.o standby.o +obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o +obj-$(CONFIG_PXA_SSP) += ssp.o + ifeq ($(CONFIG_PCI),y) obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o endif diff --git a/trunk/arch/arm/mach-pxa/corgi.c b/trunk/arch/arm/mach-pxa/corgi.c index b757dd756655..259ca821e464 100644 --- a/trunk/arch/arm/mach-pxa/corgi.c +++ b/trunk/arch/arm/mach-pxa/corgi.c @@ -493,6 +493,8 @@ static struct platform_device *devices[] __initdata = { static void corgi_poweroff(void) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + if (!machine_is_corgi()) /* Green LED off tells the bootloader to halt */ reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN); @@ -501,6 +503,8 @@ static void corgi_poweroff(void) static void corgi_restart(char mode) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + if (!machine_is_corgi()) /* Green LED on tells the bootloader to reboot */ set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN); diff --git a/trunk/arch/arm/mach-pxa/cpu-pxa.c b/trunk/arch/arm/mach-pxa/cpu-pxa.c index fb9ba1ab2826..4b21479332ae 100644 --- a/trunk/arch/arm/mach-pxa/cpu-pxa.c +++ b/trunk/arch/arm/mach-pxa/cpu-pxa.c @@ -49,216 +49,125 @@ MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); #define freq_debug 0 #endif -static unsigned int pxa27x_maxfreq; -module_param(pxa27x_maxfreq, uint, 0); -MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" - "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); - typedef struct { unsigned int khz; unsigned int membus; unsigned int cccr; unsigned int div2; - unsigned int cclkcfg; } pxa_freqs_t; /* Define the refresh period in mSec for the SDRAM and the number of rows */ -#define SDRAM_TREF 64 /* standard 64ms SDRAM */ -#define SDRAM_ROWS 4096 /* 64MB=8192 32MB=4096 */ +#define SDRAM_TREF 64 /* standard 64ms SDRAM */ +#define SDRAM_ROWS 4096 /* 64MB=8192 32MB=4096 */ +#define MDREFR_DRI(x) (((x) * SDRAM_TREF) / (SDRAM_ROWS * 32)) -#define CCLKCFG_TURBO 0x1 -#define CCLKCFG_FCS 0x2 -#define CCLKCFG_HALFTURBO 0x4 -#define CCLKCFG_FASTBUS 0x8 -#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) -#define MDREFR_DRI_MASK 0xFFF +#define CCLKCFG_TURBO 0x1 +#define CCLKCFG_FCS 0x2 +#define PXA25x_MIN_FREQ 99500 +#define PXA25x_MAX_FREQ 398100 +#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) +#define MDREFR_DRI_MASK 0xFFF -/* - * PXA255 definitions - */ -/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ -#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS +/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ static pxa_freqs_t pxa255_run_freqs[] = { - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG}, /* 99, 99, 50, 50 */ - {132700, 132700, 0x123, 1, CCLKCFG}, /* 133, 133, 66, 66 */ - {199100, 99500, 0x141, 0, CCLKCFG}, /* 199, 199, 99, 99 */ - {265400, 132700, 0x143, 1, CCLKCFG}, /* 265, 265, 133, 66 */ - {331800, 165900, 0x145, 1, CCLKCFG}, /* 331, 331, 166, 83 */ - {398100, 99500, 0x161, 0, CCLKCFG}, /* 398, 398, 196, 99 */ + /* CPU MEMBUS CCCR DIV2*/ + { 99500, 99500, 0x121, 1}, /* run= 99, turbo= 99, PXbus=50, SDRAM=50 */ + {132700, 132700, 0x123, 1}, /* run=133, turbo=133, PXbus=66, SDRAM=66 */ + {199100, 99500, 0x141, 0}, /* run=199, turbo=199, PXbus=99, SDRAM=99 */ + {265400, 132700, 0x143, 1}, /* run=265, turbo=265, PXbus=133, SDRAM=66 */ + {331800, 165900, 0x145, 1}, /* run=331, turbo=331, PXbus=166, SDRAM=83 */ + {398100, 99500, 0x161, 0}, /* run=398, turbo=398, PXbus=196, SDRAM=99 */ + {0,} }; +#define NUM_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) + +static struct cpufreq_frequency_table pxa255_run_freq_table[NUM_RUN_FREQS+1]; /* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ static pxa_freqs_t pxa255_turbo_freqs[] = { - /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ - { 99500, 99500, 0x121, 1, CCLKCFG}, /* 99, 99, 50, 50 */ - {199100, 99500, 0x221, 0, CCLKCFG}, /* 99, 199, 50, 99 */ - {298500, 99500, 0x321, 0, CCLKCFG}, /* 99, 287, 50, 99 */ - {298600, 99500, 0x1c1, 0, CCLKCFG}, /* 199, 287, 99, 99 */ - {398100, 99500, 0x241, 0, CCLKCFG}, /* 199, 398, 99, 99 */ -}; - -#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) -#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) - -static struct cpufreq_frequency_table - pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1]; -static struct cpufreq_frequency_table - pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1]; - -/* - * PXA270 definitions - * - * For the PXA27x: - * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG. - * - * A = 0 => memory controller clock from table 3-7, - * A = 1 => memory controller clock = system bus clock - * Run mode frequency = 13 MHz * L - * Turbo mode frequency = 13 MHz * L * N - * System bus frequency = 13 MHz * L / (B + 1) - * - * In CCCR: - * A = 1 - * L = 16 oscillator to run mode ratio - * 2N = 6 2 * (turbo mode to run mode ratio) - * - * In CCLKCFG: - * B = 1 Fast bus mode - * HT = 0 Half-Turbo mode - * T = 1 Turbo mode - * - * For now, just support some of the combinations in table 3-7 of - * PXA27x Processor Family Developer's Manual to simplify frequency - * change sequences. - */ -#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L) -#define CCLKCFG2(B, HT, T) \ - (CCLKCFG_FCS | \ - ((B) ? CCLKCFG_FASTBUS : 0) | \ - ((HT) ? CCLKCFG_HALFTURBO : 0) | \ - ((T) ? CCLKCFG_TURBO : 0)) - -static pxa_freqs_t pxa27x_freqs[] = { - {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1)}, - {156000, 104000, PXA27x_CCCR(1, 8, 6), 0, CCLKCFG2(1, 1, 1)}, - {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1)}, - {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1)}, - {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1)}, - {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1)}, - {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1)} + /* CPU MEMBUS CCCR DIV2*/ + { 99500, 99500, 0x121, 1}, /* run=99, turbo= 99, PXbus=50, SDRAM=50 */ + {199100, 99500, 0x221, 0}, /* run=99, turbo=199, PXbus=50, SDRAM=99 */ + {298500, 99500, 0x321, 0}, /* run=99, turbo=287, PXbus=50, SDRAM=99 */ + {298600, 99500, 0x1c1, 0}, /* run=199, turbo=287, PXbus=99, SDRAM=99 */ + {398100, 99500, 0x241, 0}, /* run=199, turbo=398, PXbus=99, SDRAM=99 */ + {0,} }; +#define NUM_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) -#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs) -static struct cpufreq_frequency_table - pxa27x_freq_table[NUM_PXA27x_FREQS+1]; +static struct cpufreq_frequency_table pxa255_turbo_freq_table[NUM_TURBO_FREQS+1]; extern unsigned get_clk_frequency_khz(int info); -static void find_freq_tables(struct cpufreq_policy *policy, - struct cpufreq_frequency_table **freq_table, - pxa_freqs_t **pxa_freqs) -{ - if (cpu_is_pxa25x()) { - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { - *pxa_freqs = pxa255_run_freqs; - *freq_table = pxa255_run_freq_table; - } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { - *pxa_freqs = pxa255_turbo_freqs; - *freq_table = pxa255_turbo_freq_table; - } else { - printk("CPU PXA: Unknown policy found. " - "Using CPUFREQ_POLICY_PERFORMANCE\n"); - *pxa_freqs = pxa255_run_freqs; - *freq_table = pxa255_run_freq_table; - } - } - if (cpu_is_pxa27x()) { - *pxa_freqs = pxa27x_freqs; - *freq_table = pxa27x_freq_table; - } -} - -static void pxa27x_guess_max_freq(void) -{ - if (!pxa27x_maxfreq) { - pxa27x_maxfreq = 416000; - printk(KERN_INFO "PXA CPU 27x max frequency not defined " - "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", - pxa27x_maxfreq); - } else { - pxa27x_maxfreq *= 1000; - } -} - -static u32 mdrefr_dri(unsigned int freq) -{ - u32 dri = 0; - - if (cpu_is_pxa25x()) - dri = ((freq * SDRAM_TREF) / (SDRAM_ROWS * 32)); - if (cpu_is_pxa27x()) - dri = ((freq * SDRAM_TREF) / (SDRAM_ROWS - 31)) / 32; - return dri; -} - /* find a valid frequency point */ static int pxa_verify_policy(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freqs; int ret; - find_freq_tables(policy, &pxa_freqs_table, &pxa_freqs); + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + pxa_freqs_table = pxa255_run_freq_table; + } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { + pxa_freqs_table = pxa255_turbo_freq_table; + } else { + printk("CPU PXA: Unknown policy found. " + "Using CPUFREQ_POLICY_PERFORMANCE\n"); + pxa_freqs_table = pxa255_run_freq_table; + } + ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); if (freq_debug) pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", - policy->min, policy->max); + policy->min, policy->max); return ret; } -static unsigned int pxa_cpufreq_get(unsigned int cpu) -{ - return get_clk_frequency_khz(0); -} - static int pxa_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, + unsigned int relation) { struct cpufreq_frequency_table *pxa_freqs_table; pxa_freqs_t *pxa_freq_settings; struct cpufreq_freqs freqs; unsigned int idx; unsigned long flags; - unsigned int new_freq_cpu, new_freq_mem; - unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; + unsigned int unused, preset_mdrefr, postset_mdrefr; + void *ramstart = phys_to_virt(0xa0000000); /* Get the current policy */ - find_freq_tables(policy, &pxa_freqs_table, &pxa_freq_settings); + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + pxa_freq_settings = pxa255_run_freqs; + pxa_freqs_table = pxa255_run_freq_table; + } else if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { + pxa_freq_settings = pxa255_turbo_freqs; + pxa_freqs_table = pxa255_turbo_freq_table; + } else { + printk("CPU PXA: Unknown policy found. " + "Using CPUFREQ_POLICY_PERFORMANCE\n"); + pxa_freq_settings = pxa255_run_freqs; + pxa_freqs_table = pxa255_run_freq_table; + } /* Lookup the next frequency */ if (cpufreq_frequency_table_target(policy, pxa_freqs_table, - target_freq, relation, &idx)) { + target_freq, relation, &idx)) { return -EINVAL; } - new_freq_cpu = pxa_freq_settings[idx].khz; - new_freq_mem = pxa_freq_settings[idx].membus; freqs.old = policy->cur; - freqs.new = new_freq_cpu; + freqs.new = pxa_freq_settings[idx].khz; freqs.cpu = policy->cpu; if (freq_debug) - pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " - "(SDRAM %d Mhz)\n", - freqs.new / 1000, (pxa_freq_settings[idx].div2) ? - (new_freq_mem / 2000) : (new_freq_mem / 1000)); + pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", + freqs.new / 1000, (pxa_freq_settings[idx].div2) ? + (pxa_freq_settings[idx].membus / 2000) : + (pxa_freq_settings[idx].membus / 1000)); /* * Tell everyone what we're about to do... @@ -268,16 +177,16 @@ static int pxa_set_target(struct cpufreq_policy *policy, cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* Calculate the next MDREFR. If we're slowing down the SDRAM clock - * we need to preset the smaller DRI before the change. If we're - * speeding up we need to set the larger DRI value after the change. + * we need to preset the smaller DRI before the change. If we're speeding + * up we need to set the larger DRI value after the change. */ preset_mdrefr = postset_mdrefr = MDREFR; - if ((MDREFR & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) { - preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); - preset_mdrefr |= mdrefr_dri(new_freq_mem); + if ((MDREFR & MDREFR_DRI_MASK) > MDREFR_DRI(pxa_freq_settings[idx].membus)) { + preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK) | + MDREFR_DRI(pxa_freq_settings[idx].membus); } - postset_mdrefr = - (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem); + postset_mdrefr = (postset_mdrefr & ~MDREFR_DRI_MASK) | + MDREFR_DRI(pxa_freq_settings[idx].membus); /* If we're dividing the memory clock by two for the SDRAM clock, this * must be set prior to the change. Clearing the divide must be done @@ -292,27 +201,26 @@ static int pxa_set_target(struct cpufreq_policy *policy, local_irq_save(flags); - /* Set new the CCCR and prepare CCLKCFG */ + /* Set new the CCCR */ CCCR = pxa_freq_settings[idx].cccr; - cclkcfg = pxa_freq_settings[idx].cclkcfg; asm volatile(" \n\ ldr r4, [%1] /* load MDREFR */ \n\ b 2f \n\ - .align 5 \n\ + .align 5 \n\ 1: \n\ - str %3, [%1] /* preset the MDREFR */ \n\ + str %4, [%1] /* preset the MDREFR */ \n\ mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\ - str %4, [%1] /* postset the MDREFR */ \n\ + str %5, [%1] /* postset the MDREFR */ \n\ \n\ b 3f \n\ 2: b 1b \n\ 3: nop \n\ " - : "=&r" (unused) - : "r" (&MDREFR), "r" (cclkcfg), - "r" (preset_mdrefr), "r" (postset_mdrefr) - : "r4", "r5"); + : "=&r" (unused) + : "r" (&MDREFR), "r" (CCLKCFG_TURBO|CCLKCFG_FCS), "r" (ramstart), + "r" (preset_mdrefr), "r" (postset_mdrefr) + : "r4", "r5"); local_irq_restore(flags); /* @@ -325,57 +233,38 @@ static int pxa_set_target(struct cpufreq_policy *policy, return 0; } -static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) +static unsigned int pxa_cpufreq_get(unsigned int cpu) { - int i; - unsigned int freq; + return get_clk_frequency_khz(0); +} - /* try to guess pxa27x cpu */ - if (cpu_is_pxa27x()) - pxa27x_guess_max_freq(); +static int pxa_cpufreq_init(struct cpufreq_policy *policy) +{ + int i; /* set default policy and cpuinfo */ policy->governor = CPUFREQ_DEFAULT_GOVERNOR; - if (cpu_is_pxa25x()) - policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + policy->cpuinfo.max_freq = PXA25x_MAX_FREQ; + policy->cpuinfo.min_freq = PXA25x_MIN_FREQ; policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ - policy->cur = get_clk_frequency_khz(0); /* current freq */ + policy->cur = get_clk_frequency_khz(0); /* current freq */ policy->min = policy->max = policy->cur; - /* Generate pxa25x the run cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { + /* Generate the run cpufreq_frequency_table struct */ + for (i = 0; i < NUM_RUN_FREQS; i++) { pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; pxa255_run_freq_table[i].index = i; } - pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; - /* Generate pxa25x the turbo cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { - pxa255_turbo_freq_table[i].frequency = - pxa255_turbo_freqs[i].khz; + pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; + /* Generate the turbo cpufreq_frequency_table struct */ + for (i = 0; i < NUM_TURBO_FREQS; i++) { + pxa255_turbo_freq_table[i].frequency = pxa255_turbo_freqs[i].khz; pxa255_turbo_freq_table[i].index = i; } pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; - /* Generate the pxa27x cpufreq_frequency_table struct */ - for (i = 0; i < NUM_PXA27x_FREQS; i++) { - freq = pxa27x_freqs[i].khz; - if (freq > pxa27x_maxfreq) - break; - pxa27x_freq_table[i].frequency = freq; - pxa27x_freq_table[i].index = i; - } - pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; - - /* - * Set the policy's minimum and maximum frequencies from the tables - * just constructed. This sets cpuinfo.mxx_freq, min and max. - */ - if (cpu_is_pxa25x()) - cpufreq_frequency_table_cpuinfo(policy, pxa255_run_freq_table); - else if (cpu_is_pxa27x()) - cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); - printk(KERN_INFO "PXA CPU frequency change support initialized\n"); return 0; @@ -386,25 +275,26 @@ static struct cpufreq_driver pxa_cpufreq_driver = { .target = pxa_set_target, .init = pxa_cpufreq_init, .get = pxa_cpufreq_get, - .name = "PXA2xx", + .name = "PXA25x", }; static int __init pxa_cpu_init(void) { int ret = -ENODEV; - if (cpu_is_pxa25x() || cpu_is_pxa27x()) + if (cpu_is_pxa25x()) ret = cpufreq_register_driver(&pxa_cpufreq_driver); return ret; } static void __exit pxa_cpu_exit(void) { - cpufreq_unregister_driver(&pxa_cpufreq_driver); + if (cpu_is_pxa25x()) + cpufreq_unregister_driver(&pxa_cpufreq_driver); } -MODULE_AUTHOR("Intrinsyc Software Inc."); -MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture"); +MODULE_AUTHOR ("Intrinsyc Software Inc."); +MODULE_DESCRIPTION ("CPU frequency changing driver for the PXA architecture"); MODULE_LICENSE("GPL"); module_init(pxa_cpu_init); module_exit(pxa_cpu_exit); diff --git a/trunk/arch/arm/mach-pxa/lubbock.c b/trunk/arch/arm/mach-pxa/lubbock.c index 7b9bdd0c6665..0993f4d1a0bc 100644 --- a/trunk/arch/arm/mach-pxa/lubbock.c +++ b/trunk/arch/arm/mach-pxa/lubbock.c @@ -396,7 +396,7 @@ static struct pxafb_mach_info sharp_lm8v31 = { .cmap_inverse = 0, .cmap_static = 0, .lcd_conn = LCD_COLOR_DSTN_16BPP | LCD_PCLK_EDGE_FALL | - LCD_AC_BIAS_FREQ(255), + LCD_AC_BIAS_FREQ(255); }; #define MMC_POLL_RATE msecs_to_jiffies(1000) diff --git a/trunk/arch/arm/mach-pxa/pm.c b/trunk/arch/arm/mach-pxa/pm.c index 7d4debbdcca3..ec1bbf333a3a 100644 --- a/trunk/arch/arm/mach-pxa/pm.c +++ b/trunk/arch/arm/mach-pxa/pm.c @@ -42,17 +42,20 @@ int pxa_pm_enter(suspend_state_t state) if (state != PM_SUSPEND_STANDBY) { pxa_cpu_pm_fns->save(sleep_save); /* before sleeping, calculate and save a checksum */ - for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) + for (i = 0; i < pxa_cpu_pm_fns->save_size - 1; i++) sleep_save_checksum += sleep_save[i]; } + /* Clear reset status */ + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + /* *** go zzz *** */ pxa_cpu_pm_fns->enter(state); cpu_init(); if (state != PM_SUSPEND_STANDBY) { /* after sleeping, validate the checksum */ - for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) + for (i = 0; i < pxa_cpu_pm_fns->save_size - 1; i++) checksum += sleep_save[i]; /* if invalid, display message and wait for a hardware reset */ @@ -98,8 +101,7 @@ static int __init pxa_pm_init(void) return -EINVAL; } - sleep_save = kmalloc(pxa_cpu_pm_fns->save_count * sizeof(unsigned long), - GFP_KERNEL); + sleep_save = kmalloc(pxa_cpu_pm_fns->save_size, GFP_KERNEL); if (!sleep_save) { printk(KERN_ERR "failed to alloc memory for pm save\n"); return -ENOMEM; diff --git a/trunk/arch/arm/mach-pxa/poodle.c b/trunk/arch/arm/mach-pxa/poodle.c index 0b30f25cff3c..ca5ac196b47b 100644 --- a/trunk/arch/arm/mach-pxa/poodle.c +++ b/trunk/arch/arm/mach-pxa/poodle.c @@ -326,11 +326,13 @@ static struct platform_device *devices[] __initdata = { static void poodle_poweroff(void) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; arm_machine_restart('h'); } static void poodle_restart(char mode) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; arm_machine_restart('h'); } diff --git a/trunk/arch/arm/mach-pxa/pxa25x.c b/trunk/arch/arm/mach-pxa/pxa25x.c index e5b417d14bb0..d9b5450aee5b 100644 --- a/trunk/arch/arm/mach-pxa/pxa25x.c +++ b/trunk/arch/arm/mach-pxa/pxa25x.c @@ -150,7 +150,9 @@ static struct clk pxa25x_clks[] = { * More ones like CP and general purpose register values are preserved * with the stack pointer in sleep.S. */ -enum { SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, +enum { SLEEP_SAVE_START = 0, + + SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, SLEEP_SAVE_GAFR0_L, SLEEP_SAVE_GAFR0_U, SLEEP_SAVE_GAFR1_L, SLEEP_SAVE_GAFR1_U, @@ -160,7 +162,7 @@ enum { SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, SLEEP_SAVE_CKEN, - SLEEP_SAVE_COUNT + SLEEP_SAVE_SIZE }; @@ -198,9 +200,6 @@ static void pxa25x_cpu_pm_restore(unsigned long *sleep_save) static void pxa25x_cpu_pm_enter(suspend_state_t state) { - /* Clear reset status */ - RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; - switch (state) { case PM_SUSPEND_MEM: /* set resume return address */ @@ -211,7 +210,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) } static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { - .save_count = SLEEP_SAVE_COUNT, + .save_size = SLEEP_SAVE_SIZE, .valid = suspend_valid_only_mem, .save = pxa25x_cpu_pm_save, .restore = pxa25x_cpu_pm_restore, diff --git a/trunk/arch/arm/mach-pxa/pxa27x.c b/trunk/arch/arm/mach-pxa/pxa27x.c index 7e945836e129..7a2449dd0fd4 100644 --- a/trunk/arch/arm/mach-pxa/pxa27x.c +++ b/trunk/arch/arm/mach-pxa/pxa27x.c @@ -181,7 +181,9 @@ static struct clk pxa27x_clks[] = { * More ones like CP and general purpose register values are preserved * with the stack pointer in sleep.S. */ -enum { SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, SLEEP_SAVE_PGSR3, +enum { SLEEP_SAVE_START = 0, + + SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, SLEEP_SAVE_PGSR3, SLEEP_SAVE_GAFR0_L, SLEEP_SAVE_GAFR0_U, SLEEP_SAVE_GAFR1_L, SLEEP_SAVE_GAFR1_U, @@ -196,7 +198,7 @@ enum { SLEEP_SAVE_PGSR0, SLEEP_SAVE_PGSR1, SLEEP_SAVE_PGSR2, SLEEP_SAVE_PGSR3, SLEEP_SAVE_PWER, SLEEP_SAVE_PCFR, SLEEP_SAVE_PRER, SLEEP_SAVE_PFER, SLEEP_SAVE_PKWR, - SLEEP_SAVE_COUNT + SLEEP_SAVE_SIZE }; void pxa27x_cpu_pm_save(unsigned long *sleep_save) @@ -249,9 +251,6 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) /* Clear edge-detect status register. */ PEDR = 0xDF12FE1B; - /* Clear reset status */ - RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; - switch (state) { case PM_SUSPEND_STANDBY: pxa_cpu_standby(); @@ -270,7 +269,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state) } static struct pxa_cpu_pm_fns pxa27x_cpu_pm_fns = { - .save_count = SLEEP_SAVE_COUNT, + .save_size = SLEEP_SAVE_SIZE, .save = pxa27x_cpu_pm_save, .restore = pxa27x_cpu_pm_restore, .valid = pxa27x_cpu_pm_valid, diff --git a/trunk/arch/arm/mach-pxa/pxa3xx.c b/trunk/arch/arm/mach-pxa/pxa3xx.c index 644550bfa330..b6a6f5fcc77a 100644 --- a/trunk/arch/arm/mach-pxa/pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/pxa3xx.c @@ -256,11 +256,12 @@ static unsigned long wakeup_src; #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] -enum { SLEEP_SAVE_CKENA, +enum { SLEEP_SAVE_START = 0, + SLEEP_SAVE_CKENA, SLEEP_SAVE_CKENB, SLEEP_SAVE_ACCR, - SLEEP_SAVE_COUNT, + SLEEP_SAVE_SIZE, }; static void pxa3xx_cpu_pm_save(unsigned long *sleep_save) @@ -375,7 +376,7 @@ static int pxa3xx_cpu_pm_valid(suspend_state_t state) } static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = { - .save_count = SLEEP_SAVE_COUNT, + .save_size = SLEEP_SAVE_SIZE, .save = pxa3xx_cpu_pm_save, .restore = pxa3xx_cpu_pm_restore, .valid = pxa3xx_cpu_pm_valid, diff --git a/trunk/arch/arm/mach-pxa/spitz.c b/trunk/arch/arm/mach-pxa/spitz.c index e7d0fcd9b43f..62a02c3927c5 100644 --- a/trunk/arch/arm/mach-pxa/spitz.c +++ b/trunk/arch/arm/mach-pxa/spitz.c @@ -529,6 +529,8 @@ static struct platform_device *devices[] __initdata = { static void spitz_poweroff(void) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + pxa_gpio_mode(SPITZ_GPIO_ON_RESET | GPIO_OUT); GPSR(SPITZ_GPIO_ON_RESET) = GPIO_bit(SPITZ_GPIO_ON_RESET); diff --git a/trunk/arch/arm/mach-pxa/spitz_pm.c b/trunk/arch/arm/mach-pxa/spitz_pm.c index 23f050feb208..7a7f5f947cc5 100644 --- a/trunk/arch/arm/mach-pxa/spitz_pm.c +++ b/trunk/arch/arm/mach-pxa/spitz_pm.c @@ -119,6 +119,9 @@ static void spitz_presuspend(void) /* nRESET_OUT Disable */ PSLR |= PSLR_SL_ROD; + /* Clear reset status */ + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ PCFR = PCFR_GPR_EN | PCFR_OPDE; } diff --git a/trunk/arch/arm/mach-pxa/tosa.c b/trunk/arch/arm/mach-pxa/tosa.c index c2cbd66db814..6458f6d371d9 100644 --- a/trunk/arch/arm/mach-pxa/tosa.c +++ b/trunk/arch/arm/mach-pxa/tosa.c @@ -467,6 +467,8 @@ static struct platform_device *devices[] __initdata = { static void tosa_poweroff(void) { + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + pxa_gpio_mode(TOSA_GPIO_ON_RESET | GPIO_OUT); GPSR(TOSA_GPIO_ON_RESET) = GPIO_bit(TOSA_GPIO_ON_RESET); diff --git a/trunk/arch/arm/mach-sa1100/pm.c b/trunk/arch/arm/mach-sa1100/pm.c index 1693d447a224..246c573e7252 100644 --- a/trunk/arch/arm/mach-sa1100/pm.c +++ b/trunk/arch/arm/mach-sa1100/pm.c @@ -43,18 +43,20 @@ extern void sa1100_cpu_resume(void); * More ones like CP and general purpose register values are preserved * on the stack and then the stack pointer is stored last in sleep.S. */ -enum { SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR, +enum { SLEEP_SAVE_SP = 0, + + SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR, SLEEP_SAVE_PPDR, SLEEP_SAVE_PPSR, SLEEP_SAVE_PPAR, SLEEP_SAVE_PSDR, SLEEP_SAVE_Ser1SDCR0, - SLEEP_SAVE_COUNT + SLEEP_SAVE_SIZE }; static int sa11x0_pm_enter(suspend_state_t state) { - unsigned long gpio, sleep_save[SLEEP_SAVE_COUNT]; + unsigned long gpio, sleep_save[SLEEP_SAVE_SIZE]; gpio = GPLR; diff --git a/trunk/arch/arm/plat-s3c24xx/clock.c b/trunk/arch/arm/plat-s3c24xx/clock.c index 3ac8d8d781b3..d84167fb33b1 100644 --- a/trunk/arch/arm/plat-s3c24xx/clock.c +++ b/trunk/arch/arm/plat-s3c24xx/clock.c @@ -411,7 +411,7 @@ static int s3c24xx_clkout_setparent(struct clk *clk, struct clk *parent) clk->parent = parent; - if (clk == &s3c24xx_clkout0) + if (clk == &s3c24xx_dclk0) mask = S3C2410_MISCCR_CLK0_MASK; else { source <<= 4; @@ -437,7 +437,7 @@ struct clk s3c24xx_dclk0 = { struct clk s3c24xx_dclk1 = { .name = "dclk1", .id = -1, - .ctrlbit = S3C2410_DCLKCON_DCLK1EN, + .ctrlbit = S3C2410_DCLKCON_DCLK0EN, .enable = s3c24xx_dclk_enable, .set_parent = s3c24xx_dclk_setparent, .set_rate = s3c24xx_set_dclk_rate, diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig index fd5708523f2e..795d0ac67c21 100644 --- a/trunk/arch/blackfin/Kconfig +++ b/trunk/arch/blackfin/Kconfig @@ -832,7 +832,6 @@ config BANK_0 config BANK_1 hex "Bank 1" default 0x7BB0 - default 0x5558 if BF54x config BANK_2 hex "Bank 2" @@ -964,23 +963,22 @@ endchoice endmenu +if (BF537 || BF533 || BF54x) + menu "CPU Frequency scaling" source "drivers/cpufreq/Kconfig" -config CPU_VOLTAGE - bool "CPU Voltage scaling" - depends on EXPERIMENTAL - depends on CPU_FREQ +config CPU_FREQ + bool default n help - Say Y here if you want CPU voltage scaling according to the CPU frequency. - This option violates the PLL BYPASS recommendation in the Blackfin Processor - manuals. There is a theoretical risk that during VDDINT transitions - the PLL may unlock. - + If you want to enable this option, you should select the + DPMC driver from Character Devices. endmenu +endif + source "net/Kconfig" source "drivers/Kconfig" diff --git a/trunk/arch/blackfin/kernel/asm-offsets.c b/trunk/arch/blackfin/kernel/asm-offsets.c index 881afe9082c7..721f15f3cebf 100644 --- a/trunk/arch/blackfin/kernel/asm-offsets.c +++ b/trunk/arch/blackfin/kernel/asm-offsets.c @@ -56,6 +56,9 @@ int main(void) /* offsets into the thread struct */ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); + DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat)); + DEFINE(PT_SR, offsetof(struct thread_struct, seqstat)); + DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_PC, offsetof(struct thread_struct, pc)); DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); diff --git a/trunk/arch/blackfin/kernel/fixed_code.S b/trunk/arch/blackfin/kernel/fixed_code.S index 4b03ba025488..5ed47228a390 100644 --- a/trunk/arch/blackfin/kernel/fixed_code.S +++ b/trunk/arch/blackfin/kernel/fixed_code.S @@ -1,6 +1,6 @@ /* * This file contains sequences of code that will be copied to a - * fixed location, defined in . The interrupt + * fixed location, defined in . The interrupt * handlers ensure that these sequences appear to be atomic when * executed from userspace. * These are aligned to 16 bytes, so that we have some space to replace diff --git a/trunk/arch/blackfin/kernel/module.c b/trunk/arch/blackfin/kernel/module.c index 14a42848f37f..8b9fe29d03f4 100644 --- a/trunk/arch/blackfin/kernel/module.c +++ b/trunk/arch/blackfin/kernel/module.c @@ -160,13 +160,6 @@ int module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, char *secstrings, struct module *mod) { - /* - * XXX: sechdrs are vmalloced in kernel/module.c - * and would be vfreed just after module is loaded, - * so we hack to keep the only information we needed - * in mod->arch to correctly free L1 I/D sram later. - * NOTE: this breaks the semantic of mod->arch structure. - */ Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; void *dest = NULL; @@ -174,8 +167,8 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, if ((strcmp(".l1.text", secstrings + s->sh_name) == 0) || ((strcmp(".text", secstrings + s->sh_name) == 0) && (hdr->e_flags & FLG_CODE_IN_L1) && (s->sh_size > 0))) { + mod->arch.text_l1 = s; dest = l1_inst_sram_alloc(s->sh_size); - mod->arch.text_l1 = dest; if (dest == NULL) { printk(KERN_ERR "module %s: L1 instruction memory allocation failed\n", @@ -189,8 +182,8 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, if ((strcmp(".l1.data", secstrings + s->sh_name) == 0) || ((strcmp(".data", secstrings + s->sh_name) == 0) && (hdr->e_flags & FLG_DATA_IN_L1) && (s->sh_size > 0))) { + mod->arch.data_a_l1 = s; dest = l1_data_sram_alloc(s->sh_size); - mod->arch.data_a_l1 = dest; if (dest == NULL) { printk(KERN_ERR "module %s: L1 data memory allocation failed\n", @@ -204,8 +197,8 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, if (strcmp(".l1.bss", secstrings + s->sh_name) == 0 || ((strcmp(".bss", secstrings + s->sh_name) == 0) && (hdr->e_flags & FLG_DATA_IN_L1) && (s->sh_size > 0))) { + mod->arch.bss_a_l1 = s; dest = l1_data_sram_alloc(s->sh_size); - mod->arch.bss_a_l1 = dest; if (dest == NULL) { printk(KERN_ERR "module %s: L1 data memory allocation failed\n", @@ -217,8 +210,8 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, s->sh_addr = (unsigned long)dest; } if (strcmp(".l1.data.B", secstrings + s->sh_name) == 0) { + mod->arch.data_b_l1 = s; dest = l1_data_B_sram_alloc(s->sh_size); - mod->arch.data_b_l1 = dest; if (dest == NULL) { printk(KERN_ERR "module %s: L1 data memory allocation failed\n", @@ -230,8 +223,8 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs, s->sh_addr = (unsigned long)dest; } if (strcmp(".l1.bss.B", secstrings + s->sh_name) == 0) { + mod->arch.bss_b_l1 = s; dest = l1_data_B_sram_alloc(s->sh_size); - mod->arch.bss_b_l1 = dest; if (dest == NULL) { printk(KERN_ERR "module %s: L1 data memory allocation failed\n", @@ -423,14 +416,14 @@ module_finalize(const Elf_Ehdr * hdr, void module_arch_cleanup(struct module *mod) { - if (mod->arch.text_l1) - l1_inst_sram_free((void *)mod->arch.text_l1); - if (mod->arch.data_a_l1) - l1_data_sram_free((void *)mod->arch.data_a_l1); - if (mod->arch.bss_a_l1) - l1_data_sram_free((void *)mod->arch.bss_a_l1); - if (mod->arch.data_b_l1) - l1_data_B_sram_free((void *)mod->arch.data_b_l1); - if (mod->arch.bss_b_l1) - l1_data_B_sram_free((void *)mod->arch.bss_b_l1); + if ((mod->arch.text_l1) && (mod->arch.text_l1->sh_addr)) + l1_inst_sram_free((void *)mod->arch.text_l1->sh_addr); + if ((mod->arch.data_a_l1) && (mod->arch.data_a_l1->sh_addr)) + l1_data_sram_free((void *)mod->arch.data_a_l1->sh_addr); + if ((mod->arch.bss_a_l1) && (mod->arch.bss_a_l1->sh_addr)) + l1_data_sram_free((void *)mod->arch.bss_a_l1->sh_addr); + if ((mod->arch.data_b_l1) && (mod->arch.data_b_l1->sh_addr)) + l1_data_B_sram_free((void *)mod->arch.data_b_l1->sh_addr); + if ((mod->arch.bss_b_l1) && (mod->arch.bss_b_l1->sh_addr)) + l1_data_B_sram_free((void *)mod->arch.bss_b_l1->sh_addr); } diff --git a/trunk/arch/blackfin/kernel/process.c b/trunk/arch/blackfin/kernel/process.c index 53c2cd255441..be9fdd00d7cb 100644 --- a/trunk/arch/blackfin/kernel/process.c +++ b/trunk/arch/blackfin/kernel/process.c @@ -245,7 +245,7 @@ unsigned long get_wchan(struct task_struct *p) void finish_atomic_sections (struct pt_regs *regs) { - int __user *up0 = (int __user *)regs->p0; + int __user *up0 = (int __user *)®s->p0; if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) return; diff --git a/trunk/arch/blackfin/kernel/ptrace.c b/trunk/arch/blackfin/kernel/ptrace.c index f51ab088098e..b4f062c172c6 100644 --- a/trunk/arch/blackfin/kernel/ptrace.c +++ b/trunk/arch/blackfin/kernel/ptrace.c @@ -185,8 +185,8 @@ void ptrace_disable(struct task_struct *child) { unsigned long tmp; /* make sure the single step bit is not set. */ - tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; - put_reg(child, PT_SYSCFG, tmp); + tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16); + put_reg(child, PT_SR, tmp); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) diff --git a/trunk/arch/blackfin/kernel/signal.c b/trunk/arch/blackfin/kernel/signal.c index dbc3bbf846be..cb9d883d493c 100644 --- a/trunk/arch/blackfin/kernel/signal.c +++ b/trunk/arch/blackfin/kernel/signal.c @@ -42,9 +42,6 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -/* Location of the trace bit in SYSCFG. */ -#define TRACE_BITS 0x0001 - struct fdpic_func_descriptor { unsigned long text; unsigned long GOT; @@ -228,16 +225,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, regs->r1 = (unsigned long)(&frame->info); regs->r2 = (unsigned long)(&frame->uc); - /* - * Clear the trace flag when entering the signal handler, but - * notify any tracer that was single-stepping it. The tracer - * may want to single-step inside the handler too. - */ - if (regs->syscfg & TRACE_BITS) { - regs->syscfg &= ~TRACE_BITS; - ptrace_notify(SIGTRAP); - } - return 0; give_sigsegv: diff --git a/trunk/arch/blackfin/kernel/time-ts.c b/trunk/arch/blackfin/kernel/time-ts.c index e887efc86c29..4482c47c09e5 100644 --- a/trunk/arch/blackfin/kernel/time-ts.c +++ b/trunk/arch/blackfin/kernel/time-ts.c @@ -60,7 +60,7 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc) static cycle_t read_cycles(void) { - return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); + return get_cycles(); } unsigned long long sched_clock(void) @@ -117,7 +117,7 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, break; } case CLOCK_EVT_MODE_ONESHOT: - bfin_write_TSCALE(TIME_SCALE - 1); + bfin_write_TSCALE(0); bfin_write_TCOUNT(0); bfin_write_TCNTL(TMPWR | TMREN); CSYNC(); @@ -183,14 +183,10 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) static int __init bfin_clockevent_init(void) { - unsigned long timer_clk; - - timer_clk = get_cclk() / TIME_SCALE; - setup_irq(IRQ_CORETMR, &bfin_timer_irq); bfin_timer_init(); - clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); + clockevent_bfin.mult = div_sc(get_cclk(), NSEC_PER_SEC, clockevent_bfin.shift); clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); clockevents_register_device(&clockevent_bfin); diff --git a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c index 8aa49f804228..583d53811f03 100644 --- a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c @@ -32,14 +32,12 @@ #include #include #include -#include #include #include #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include #endif #include -#include #include #include #include @@ -52,7 +50,6 @@ #include #include #include -#include #include /* @@ -174,46 +171,6 @@ static struct platform_device bf52x_t350mcqb_device = { }; #endif -#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) -static struct mtd_partition ezkit_partitions[] = { - { - .name = "Bootloader", - .size = 0x40000, - .offset = 0, - }, { - .name = "Kernel", - .size = 0x1C0000, - .offset = MTDPART_OFS_APPEND, - }, { - .name = "RootFS", - .size = MTDPART_SIZ_FULL, - .offset = MTDPART_OFS_APPEND, - } -}; - -static struct physmap_flash_data ezkit_flash_data = { - .width = 2, - .parts = ezkit_partitions, - .nr_parts = ARRAY_SIZE(ezkit_partitions), -}; - -static struct resource ezkit_flash_resource = { - .start = 0x20000000, - .end = 0x203fffff, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device ezkit_flash_device = { - .name = "physmap-flash", - .id = 0, - .dev = { - .platform_data = &ezkit_flash_data, - }, - .num_resources = 1, - .resource = &ezkit_flash_resource, -}; -#endif - #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { @@ -463,7 +420,11 @@ static struct mtd_partition bfin_spi_flash_partitions[] = { .offset = 0, .mask_flags = MTD_CAP_ROM }, { - .name = "linux kernel", + .name = "kernel", + .size = 0xe0000, + .offset = MTDPART_OFS_APPEND, + }, { + .name = "file system", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } @@ -473,7 +434,7 @@ static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), - .type = "m25p16", + .type = "m25p64", }; /* SPI flash chip (m25p64) */ @@ -794,24 +755,6 @@ static struct platform_device i2c_bfin_twi_device = { }; #endif -#ifdef CONFIG_I2C_BOARDINFO -static struct i2c_board_info __initdata bfin_i2c_board_info[] = { -#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) - { - I2C_BOARD_INFO("pcf8574_lcd", 0x22), - .type = "pcf8574_lcd", - }, -#endif -#if defined(CONFIG_TWI_KEYPAD) || defined(CONFIG_TWI_KEYPAD_MODULE) - { - I2C_BOARD_INFO("pcf8574_keypad", 0x27), - .type = "pcf8574_keypad", - .irq = IRQ_PF8, - }, -#endif -}; -#endif - #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", @@ -896,32 +839,7 @@ static struct platform_device bfin_gpios_device = { .resource = &bfin_gpios_resources, }; -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_100, 400000000), - VRPAIR(VLEV_105, 426000000), - VRPAIR(VLEV_110, 500000000), - VRPAIR(VLEV_115, 533000000), - VRPAIR(VLEV_120, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *stamp_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif @@ -1003,22 +921,12 @@ static struct platform_device *stamp_devices[] __initdata = { &bfin_device_gpiokeys, #endif -#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) - &ezkit_flash_device, -#endif - &bfin_gpios_device, }; static int __init stamp_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); - -#ifdef CONFIG_I2C_BOARDINFO - i2c_register_board_info(0, bfin_i2c_board_info, - ARRAY_SIZE(bfin_i2c_board_info)); -#endif - platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) spi_register_board_info(bfin_spi_board_info, diff --git a/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c b/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c index ed2b0b8f5dc9..a03149c72681 100644 --- a/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -33,15 +33,12 @@ #include #include #include -#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include -#endif #include #include #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -344,37 +341,7 @@ static struct platform_device bfin_pata_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 376000000), - VRPAIR(VLEV_095, 426000000), - VRPAIR(VLEV_100, 426000000), - VRPAIR(VLEV_105, 476000000), - VRPAIR(VLEV_110, 476000000), - VRPAIR(VLEV_115, 476000000), - VRPAIR(VLEV_120, 600000000), - VRPAIR(VLEV_125, 600000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *cm_bf533_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) &bfin_uart_device, #endif diff --git a/trunk/arch/blackfin/mach-bf533/boards/ezkit.c b/trunk/arch/blackfin/mach-bf533/boards/ezkit.c index 9d28415163ea..08a7943949ae 100644 --- a/trunk/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/trunk/arch/blackfin/mach-bf533/boards/ezkit.c @@ -42,7 +42,6 @@ #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -351,37 +350,7 @@ static struct platform_device i2c_gpio_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 376000000), - VRPAIR(VLEV_095, 426000000), - VRPAIR(VLEV_100, 426000000), - VRPAIR(VLEV_105, 476000000), - VRPAIR(VLEV_110, 476000000), - VRPAIR(VLEV_115, 476000000), - VRPAIR(VLEV_120, 600000000), - VRPAIR(VLEV_125, 600000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *ezkit_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif diff --git a/trunk/arch/blackfin/mach-bf533/boards/stamp.c b/trunk/arch/blackfin/mach-bf533/boards/stamp.c index 7fd35fb32fd5..024f418ae543 100644 --- a/trunk/arch/blackfin/mach-bf533/boards/stamp.c +++ b/trunk/arch/blackfin/mach-bf533/boards/stamp.c @@ -45,7 +45,6 @@ #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -517,37 +516,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 376000000), - VRPAIR(VLEV_095, 426000000), - VRPAIR(VLEV_100, 426000000), - VRPAIR(VLEV_105, 476000000), - VRPAIR(VLEV_110, 476000000), - VRPAIR(VLEV_115, 476000000), - VRPAIR(VLEV_120, 600000000), - VRPAIR(VLEV_125, 600000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *stamp_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif diff --git a/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c b/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c index 73f2142875e2..d8a23cd9b9ed 100644 --- a/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c +++ b/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c @@ -35,15 +35,12 @@ #include #include #include -#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include -#endif #include #include #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -431,37 +428,7 @@ static struct platform_device bfin_pata_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 376000000), - VRPAIR(VLEV_095, 426000000), - VRPAIR(VLEV_100, 426000000), - VRPAIR(VLEV_105, 476000000), - VRPAIR(VLEV_110, 476000000), - VRPAIR(VLEV_115, 476000000), - VRPAIR(VLEV_120, 500000000), - VRPAIR(VLEV_125, 533000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *cm_bf537_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) &hitachi_fb_device, #endif diff --git a/trunk/arch/blackfin/mach-bf537/boards/stamp.c b/trunk/arch/blackfin/mach-bf537/boards/stamp.c index 9a756d1f3d73..d3727b7c2d7d 100644 --- a/trunk/arch/blackfin/mach-bf537/boards/stamp.c +++ b/trunk/arch/blackfin/mach-bf537/boards/stamp.c @@ -47,7 +47,6 @@ #include #include #include -#include #include /* @@ -818,37 +817,7 @@ static struct platform_device bfin_pata_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 376000000), - VRPAIR(VLEV_095, 426000000), - VRPAIR(VLEV_100, 426000000), - VRPAIR(VLEV_105, 476000000), - VRPAIR(VLEV_110, 476000000), - VRPAIR(VLEV_115, 476000000), - VRPAIR(VLEV_120, 500000000), - VRPAIR(VLEV_125, 533000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *stamp_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) &bfin_pcmcia_cf_device, #endif diff --git a/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c b/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c index 3b74f96d3590..e3e8479fffb5 100644 --- a/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/trunk/arch/blackfin/mach-bf548/boards/cm_bf548.c @@ -36,9 +36,7 @@ #include #include #include -#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) #include -#endif #include #include #include @@ -46,7 +44,6 @@ #include #include #include -#include #include #include @@ -593,38 +590,7 @@ static struct platform_device bfin_device_gpiokeys = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ -/* - * Internal VLEV BF54XSBBC1533 - ****temporarily using these values until data sheet is updated - */ - VRPAIR(VLEV_085, 150000000), - VRPAIR(VLEV_090, 250000000), - VRPAIR(VLEV_110, 276000000), - VRPAIR(VLEV_115, 301000000), - VRPAIR(VLEV_120, 525000000), - VRPAIR(VLEV_125, 550000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *cm_bf548_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif diff --git a/trunk/arch/blackfin/mach-bf548/boards/ezkit.c b/trunk/arch/blackfin/mach-bf548/boards/ezkit.c index d1682bb37509..b00f68ac6bc9 100644 --- a/trunk/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/trunk/arch/blackfin/mach-bf548/boards/ezkit.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -690,38 +689,7 @@ static struct platform_device bfin_gpios_device = { .resource = &bfin_gpios_resources, }; -static const unsigned int cclk_vlev_datasheet[] = -{ -/* - * Internal VLEV BF54XSBBC1533 - ****temporarily using these values until data sheet is updated - */ - VRPAIR(VLEV_085, 150000000), - VRPAIR(VLEV_090, 250000000), - VRPAIR(VLEV_110, 276000000), - VRPAIR(VLEV_115, 301000000), - VRPAIR(VLEV_120, 525000000), - VRPAIR(VLEV_125, 550000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *ezkit_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif diff --git a/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c b/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c index 466ef5929a25..9fd580952fd8 100644 --- a/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -33,15 +33,12 @@ #include #include #include -#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include -#endif #include #include #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -342,37 +339,8 @@ static struct platform_device bfin_pata_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 300000000), - VRPAIR(VLEV_095, 313000000), - VRPAIR(VLEV_100, 350000000), - VRPAIR(VLEV_105, 400000000), - VRPAIR(VLEV_110, 444000000), - VRPAIR(VLEV_115, 450000000), - VRPAIR(VLEV_120, 475000000), - VRPAIR(VLEV_125, 500000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *cm_bf561_devices[] __initdata = { - &bfin_dpmc, - #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) &hitachi_fb_device, #endif diff --git a/trunk/arch/blackfin/mach-bf561/boards/ezkit.c b/trunk/arch/blackfin/mach-bf561/boards/ezkit.c index 61d8f7648b24..0d74b7d99209 100644 --- a/trunk/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/trunk/arch/blackfin/mach-bf561/boards/ezkit.c @@ -39,7 +39,6 @@ #include #include #include -#include /* * Name the Board for the /proc/cpuinfo @@ -444,37 +443,7 @@ static struct platform_device i2c_gpio_device = { }; #endif -static const unsigned int cclk_vlev_datasheet[] = -{ - VRPAIR(VLEV_085, 250000000), - VRPAIR(VLEV_090, 300000000), - VRPAIR(VLEV_095, 313000000), - VRPAIR(VLEV_100, 350000000), - VRPAIR(VLEV_105, 400000000), - VRPAIR(VLEV_110, 444000000), - VRPAIR(VLEV_115, 450000000), - VRPAIR(VLEV_120, 475000000), - VRPAIR(VLEV_125, 500000000), - VRPAIR(VLEV_130, 600000000), -}; - -static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { - .tuple_tab = cclk_vlev_datasheet, - .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), - .vr_settling_time = 25 /* us */, -}; - -static struct platform_device bfin_dpmc = { - .name = "bfin dpmc", - .dev = { - .platform_data = &bfin_dmpc_vreg_data, - }, -}; - static struct platform_device *ezkit_devices[] __initdata = { - - &bfin_dpmc, - #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif diff --git a/trunk/arch/blackfin/mach-common/Makefile b/trunk/arch/blackfin/mach-common/Makefile index 422bfee34adc..393081e9b680 100644 --- a/trunk/arch/blackfin/mach-common/Makefile +++ b/trunk/arch/blackfin/mach-common/Makefile @@ -6,6 +6,5 @@ obj-y := \ cache.o cacheinit.o entry.o \ interrupt.o lock.o irqpanic.o arch_checks.o ints-priority.o -obj-$(CONFIG_PM) += pm.o dpmc_modes.o -obj-$(CONFIG_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o +obj-$(CONFIG_PM) += pm.o dpmc.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o diff --git a/trunk/arch/blackfin/mach-common/cpufreq.c b/trunk/arch/blackfin/mach-common/cpufreq.c index 75cdad291e88..ed81e00d20e1 100644 --- a/trunk/arch/blackfin/mach-common/cpufreq.c +++ b/trunk/arch/blackfin/mach-common/cpufreq.c @@ -62,14 +62,6 @@ static struct bfin_dpm_state { unsigned int tscale; /* change the divider on the core timer interrupt */ } dpm_state_table[3]; -/* - normalized to maximum frequncy offset for CYCLES, - used in time-ts cycles clock source, but could be used - somewhere also. - */ -unsigned long long __bfin_cycles_off; -unsigned int __bfin_cycles_mod; - /**************************************************************************/ static unsigned int bfin_getfreq(unsigned int cpu) @@ -88,7 +80,6 @@ static int bfin_target(struct cpufreq_policy *policy, unsigned int index, plldiv, tscale; unsigned long flags, cclk_hz; struct cpufreq_freqs freqs; - cycles_t cycles; if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, relation, &index)) @@ -110,14 +101,8 @@ static int bfin_target(struct cpufreq_policy *policy, bfin_write_PLL_DIV(plldiv); /* we have to adjust the core timer, because it is using cclk */ bfin_write_TSCALE(tscale); - cycles = get_cycles(); SSYNC(); - cycles += 10; /* ~10 cycles we loose after get_cycles() */ - __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); - __bfin_cycles_mod = index; local_irq_restore(flags); - /* TODO: just test case for cycles clock source, remove later */ - pr_debug("cpufreq: done\n"); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; @@ -134,13 +119,22 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy) unsigned long cclk, sclk, csel, min_cclk; int index; +#ifdef CONFIG_CYCLES_CLOCKSOURCE +/* + * Clocksource CYCLES is still CONTINUOUS but not longer MONOTONIC in case we enable + * CPU frequency scaling, since CYCLES runs off Core Clock. + */ + printk(KERN_WARNING "CPU frequency scaling not supported: Clocksource not suitable\n" + return -ENODEV; +#endif + if (policy->cpu != 0) return -EINVAL; cclk = get_cclk(); sclk = get_sclk(); -#if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE)) +#if ANOMALY_05000273 min_cclk = sclk * 2; #else min_cclk = sclk; diff --git a/trunk/arch/blackfin/mach-common/dpmc_modes.S b/trunk/arch/blackfin/mach-common/dpmc.S similarity index 82% rename from trunk/arch/blackfin/mach-common/dpmc_modes.S rename to trunk/arch/blackfin/mach-common/dpmc.S index b7981d31c392..9d45aa3265b1 100644 --- a/trunk/arch/blackfin/mach-common/dpmc_modes.S +++ b/trunk/arch/blackfin/mach-common/dpmc.S @@ -1,7 +1,30 @@ /* - * Copyright 2004-2008 Analog Devices Inc. + * File: arch/blackfin/mach-common/dpmc.S + * Based on: + * Author: LG Soft India * - * Licensed under the GPL-2 or later. + * Created: ? + * Description: Watchdog Timer APIs + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include diff --git a/trunk/arch/blackfin/mach-common/dpmc.c b/trunk/arch/blackfin/mach-common/dpmc.c deleted file mode 100644 index 02c7efd1bcf4..000000000000 --- a/trunk/arch/blackfin/mach-common/dpmc.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2008 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DRIVER_NAME "bfin dpmc" - -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) - -struct bfin_dpmc_platform_data *pdata; - -/** - * bfin_set_vlev - Update VLEV field in VR_CTL Reg. - * Avoid BYPASS sequence - */ -static void bfin_set_vlev(unsigned int vlev) -{ - unsigned pll_lcnt; - - pll_lcnt = bfin_read_PLL_LOCKCNT(); - - bfin_write_PLL_LOCKCNT(1); - bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev); - bfin_write_PLL_LOCKCNT(pll_lcnt); -} - -/** - * bfin_get_vlev - Get CPU specific VLEV from platform device data - */ -static unsigned int bfin_get_vlev(unsigned int freq) -{ - int i; - - if (!pdata) - goto err_out; - - freq >>= 16; - - for (i = 0; i < pdata->tabsize; i++) - if (freq <= (pdata->tuple_tab[i] & 0xFFFF)) - return pdata->tuple_tab[i] >> 16; - -err_out: - printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n"); - return VLEV_120; -} - -#ifdef CONFIG_CPU_FREQ -static int -vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) -{ - struct cpufreq_freqs *freq = data; - - if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) { - bfin_set_vlev(bfin_get_vlev(freq->new)); - udelay(pdata->vr_settling_time); /* Wait until Volatge settled */ - - } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) - bfin_set_vlev(bfin_get_vlev(freq->new)); - - return 0; -} - -static struct notifier_block vreg_cpufreq_notifier_block = { - .notifier_call = vreg_cpufreq_notifier -}; -#endif /* CONFIG_CPU_FREQ */ - -/** - * bfin_dpmc_probe - - * - */ -static int __devinit bfin_dpmc_probe(struct platform_device *pdev) -{ - if (pdev->dev.platform_data) - pdata = pdev->dev.platform_data; - else - return -EINVAL; - - return cpufreq_register_notifier(&vreg_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); -} - -/** - * bfin_dpmc_remove - - */ -static int __devexit bfin_dpmc_remove(struct platform_device *pdev) -{ - pdata = NULL; - return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); -} - -struct platform_driver bfin_dpmc_device_driver = { - .probe = bfin_dpmc_probe, - .remove = __devexit_p(bfin_dpmc_remove), - .driver = { - .name = DRIVER_NAME, - } -}; - -/** - * bfin_dpmc_init - Init driver - */ -static int __init bfin_dpmc_init(void) -{ - return platform_driver_register(&bfin_dpmc_device_driver); -} -module_init(bfin_dpmc_init); - -/** - * bfin_dpmc_exit - break down driver - */ -static void __exit bfin_dpmc_exit(void) -{ - platform_driver_unregister(&bfin_dpmc_device_driver); -} -module_exit(bfin_dpmc_exit); - -MODULE_AUTHOR("Michael Hennerich "); -MODULE_DESCRIPTION("cpu power management driver for Blackfin"); -MODULE_LICENSE("GPL"); diff --git a/trunk/arch/blackfin/mach-common/entry.S b/trunk/arch/blackfin/mach-common/entry.S index 038f70e0be65..f2fb87e9a46e 100644 --- a/trunk/arch/blackfin/mach-common/entry.S +++ b/trunk/arch/blackfin/mach-common/entry.S @@ -151,62 +151,26 @@ ENTRY(_ex_soft_bp) ENDPROC(_ex_soft_bp) ENTRY(_ex_single_step) - /* If we just returned from an interrupt, the single step event is - for the RTI instruction. */ r7 = retx; r6 = reti; cc = r7 == r6; - if cc jump _bfin_return_from_exception; - - /* If we were in user mode, do the single step normally. */ - p5.l = lo(IPEND); - p5.h = hi(IPEND); - r6 = [p5]; - r7 = 0xffe0 (z); - r7 = r7 & r6; - cc = r7 == 0; - if !cc jump 1f; - - /* Single stepping only a single instruction, so clear the trace - * bit here. */ + if cc jump _bfin_return_from_exception r7 = syscfg; bitclr (r7, 0); syscfg = R7; - jump _ex_trap_c; - -1: - /* - * We were in an interrupt handler. By convention, all of them save - * SYSCFG with their first instruction, so by checking whether our - * RETX points at the entry point, we can determine whether to allow - * a single step, or whether to clear SYSCFG. - * - * First, find out the interrupt level and the event vector for it. - */ - p5.l = lo(EVT0); - p5.h = hi(EVT0); - p5 += -4; -2: - r7 = rot r7 by -1; - p5 += 4; - if !cc jump 2b; - /* What we actually do is test for the _second_ instruction in the - * IRQ handler. That way, if there are insns following the restore - * of SYSCFG after leaving the handler, we will not turn off SYSCFG - * for them. */ - - r7 = [p5]; - r7 += 2; - r6 = RETX; - cc = R7 == R6; - if !cc jump _bfin_return_from_exception; - - r7 = syscfg; - bitclr (r7, 0); - syscfg = R7; - - /* Fall through to _bfin_return_from_exception. */ + p5.l = lo(IPEND); + p5.h = hi(IPEND); + r6 = [p5]; + cc = bittst(r6, 5); + if !cc jump _ex_trap_c; + p4.l = lo(EVT5); + p4.h = hi(EVT5); + r6.h = _exception_to_level5; + r6.l = _exception_to_level5; + r7 = [p4]; + cc = r6 == r7; + if !cc jump _ex_trap_c; ENDPROC(_ex_single_step) ENTRY(_bfin_return_from_exception) @@ -270,26 +234,20 @@ ENTRY(_ex_trap_c) p5.l = _saved_icplb_fault_addr; [p5] = r7; - p4.l = _excpt_saved_stuff; - p4.h = _excpt_saved_stuff; - + p4.l = __retx; + p4.h = __retx; r6 = retx; [p4] = r6; - - r6 = SYSCFG; - [p4 + 4] = r6; - BITCLR(r6, 0); - SYSCFG = r6; - - /* Disable all interrupts, but make sure level 5 is enabled so - * we can switch to that level. Save the old mask. */ - cli r6; - [p4 + 8] = r6; - p4.l = lo(SAFE_USER_INSTRUCTION); p4.h = hi(SAFE_USER_INSTRUCTION); retx = p4; + /* Disable all interrupts, but make sure level 5 is enabled so + * we can switch to that level. Save the old mask. */ + cli r6; + p4.l = _excpt_saved_imask; + p4.h = _excpt_saved_imask; + [p4] = r6; r6 = 0x3f; sti r6; @@ -337,11 +295,6 @@ ENTRY(_double_fault) */ SAVE_ALL_SYS - /* The dumping functions expect the return address in the RETI - * slot. */ - r6 = retx; - [sp + PT_PC] = r6; - r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; call _double_fault_c; @@ -354,17 +307,16 @@ ENDPROC(_double_fault) ENTRY(_exception_to_level5) SAVE_ALL_SYS - p4.l = _excpt_saved_stuff; - p4.h = _excpt_saved_stuff; + p4.l = __retx; + p4.h = __retx; r6 = [p4]; [sp + PT_PC] = r6; - r6 = [p4 + 4]; - [sp + PT_SYSCFG] = r6; - /* Restore interrupt mask. We haven't pushed RETI, so this * doesn't enable interrupts until we return from this handler. */ - r6 = [p4 + 8]; + p4.l = _excpt_saved_imask; + p4.h = _excpt_saved_imask; + r6 = [p4]; sti r6; /* Restore the hardware error vector. */ @@ -1392,14 +1344,7 @@ ENTRY(_sys_call_table) .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall .endr - - /* - * Used to save the real RETX, IMASK and SYSCFG when temporarily - * storing safe values across the transition from exception to IRQ5. - */ -_excpt_saved_stuff: - .long 0; - .long 0; +_excpt_saved_imask: .long 0; _exception_stack: @@ -1413,3 +1358,7 @@ _exception_stack_top: _last_cplb_fault_retx: .long 0; #endif + /* Used to save the real RETX when temporarily storing a safe + * return address. */ +__retx: + .long 0; diff --git a/trunk/arch/cris/kernel/sys_cris.c b/trunk/arch/cris/kernel/sys_cris.c index a79fbd87021b..8b9984197edc 100644 --- a/trunk/arch/cris/kernel/sys_cris.c +++ b/trunk/arch/cris/kernel/sys_cris.c @@ -27,6 +27,25 @@ #include #include +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way Unix traditionally does this, though. + */ +asmlinkage int sys_pipe(unsigned long __user * fildes) +{ + int fd[2]; + int error; + + lock_kernel(); + error = do_pipe(fd); + unlock_kernel(); + if (!error) { + if (copy_to_user(fildes, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + /* common code for old and new mmaps */ static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, diff --git a/trunk/arch/m32r/kernel/sys_m32r.c b/trunk/arch/m32r/kernel/sys_m32r.c index 305ac852bbed..6d7a80fdad48 100644 --- a/trunk/arch/m32r/kernel/sys_m32r.c +++ b/trunk/arch/m32r/kernel/sys_m32r.c @@ -76,6 +76,26 @@ asmlinkage int sys_tas(int __user *addr) return oldval; } +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way Unix traditionally does this, though. + */ +asmlinkage int +sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2, + unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, struct pt_regs regs) +{ + int fd[2]; + int error; + + error = do_pipe(fd); + if (!error) { + if (copy_to_user((void __user *)r0, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/trunk/arch/m68k/kernel/traps.c b/trunk/arch/m68k/kernel/traps.c index 75b8340b254b..fd4858e2dd63 100644 --- a/trunk/arch/m68k/kernel/traps.c +++ b/trunk/arch/m68k/kernel/traps.c @@ -468,26 +468,15 @@ static inline void access_error040(struct frame *fp) * (if do_page_fault didn't fix the mapping, * the writeback won't do good) */ -disable_wb: #ifdef DEBUG printk(".. disabling wb2\n"); #endif if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) fp->un.fmt7.wb2s &= ~WBV_040; - if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) - fp->un.fmt7.wb3s &= ~WBV_040; } - } else { - /* In case of a bus error we either kill the process or expect - * the kernel to catch the fault, which then is also responsible - * for cleaning up the mess. - */ - current->thread.signo = SIGBUS; - current->thread.faddr = fp->un.fmt7.faddr; - if (send_fault_sig(&fp->ptregs) >= 0) - printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, - fp->un.fmt7.faddr); - goto disable_wb; + } else if (send_fault_sig(&fp->ptregs) > 0) { + printk("68040 access error, ssw=%x\n", ssw); + trap_c(fp); } do_040writebacks(fp); diff --git a/trunk/arch/m68k/mac/config.c b/trunk/arch/m68k/mac/config.c index ad3e3bacae39..735a49b4b936 100644 --- a/trunk/arch/m68k/mac/config.c +++ b/trunk/arch/m68k/mac/config.c @@ -48,6 +48,9 @@ struct mac_booter_data mac_bi_data; int mac_bisize = sizeof mac_bi_data; +struct mac_hw_present mac_hw_present; +EXPORT_SYMBOL(mac_hw_present); + /* New m68k bootinfo stuff and videobase */ extern int m68k_num_memory; @@ -814,6 +817,27 @@ void __init mac_identify(void) m68k_ramdisk.addr, m68k_ramdisk.size); #endif + /* + * TODO: set the various fields in macintosh_config->hw_present here! + */ + switch (macintosh_config->scsi_type) { + case MAC_SCSI_OLD: + MACHW_SET(MAC_SCSI_80); + break; + case MAC_SCSI_QUADRA: + case MAC_SCSI_QUADRA2: + case MAC_SCSI_QUADRA3: + MACHW_SET(MAC_SCSI_96); + if ((macintosh_config->ident == MAC_MODEL_Q900) || + (macintosh_config->ident == MAC_MODEL_Q950)) + MACHW_SET(MAC_SCSI_96_2); + break; + default: + printk(KERN_WARNING "config.c: wtf: unknown scsi, using 53c80\n"); + MACHW_SET(MAC_SCSI_80); + break; + } + iop_init(); via_init(); oss_init(); diff --git a/trunk/arch/mn10300/Kconfig b/trunk/arch/mn10300/Kconfig index e856218da90d..6a6409adc564 100644 --- a/trunk/arch/mn10300/Kconfig +++ b/trunk/arch/mn10300/Kconfig @@ -186,6 +186,17 @@ config PREEMPT Say Y here if you are building a kernel for a desktop, embedded or real-time system. Say N if you are unsure. +config PREEMPT_BKL + bool "Preempt The Big Kernel Lock" + depends on PREEMPT + default y + help + This option reduces the latency of the kernel by making the + big kernel lock preemptible. + + Say Y here if you are building a kernel for a desktop system. + Say N if you are unsure. + config MN10300_CURRENT_IN_E2 bool "Hold current task address in E2 register" default y diff --git a/trunk/arch/powerpc/boot/dts/sequoia.dts b/trunk/arch/powerpc/boot/dts/sequoia.dts index 72d67564bdfc..a1ae4d6ec990 100644 --- a/trunk/arch/powerpc/boot/dts/sequoia.dts +++ b/trunk/arch/powerpc/boot/dts/sequoia.dts @@ -342,14 +342,9 @@ /* Outbound ranges, one memory and one IO, * later cannot be changed. Chip supports a second * IO range but we don't use it for now - * From the 440EPx user manual: - * PCI 1 Memory 1 8000 0000 1 BFFF FFFF 1GB - * I/O 1 E800 0000 1 E800 FFFF 64KB - * I/O 1 E880 0000 1 EBFF FFFF 56MB */ - ranges = <02000000 0 80000000 1 80000000 0 40000000 - 01000000 0 00000000 1 e8000000 0 00010000 - 01000000 0 00000000 1 e8800000 0 03800000>; + ranges = <02000000 0 80000000 1 80000000 0 10000000 + 01000000 0 00000000 1 e8000000 0 00100000>; /* Inbound 2GB range starting at 0 */ dma-ranges = <42000000 0 0 0 0 0 80000000>; diff --git a/trunk/arch/powerpc/kernel/btext.c b/trunk/arch/powerpc/kernel/btext.c index d8f0329b1344..9f9377745490 100644 --- a/trunk/arch/powerpc/kernel/btext.c +++ b/trunk/arch/powerpc/kernel/btext.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c index 81738a4b3c3a..36080d4d1922 100644 --- a/trunk/arch/powerpc/kernel/cputable.c +++ b/trunk/arch/powerpc/kernel/cputable.c @@ -1208,18 +1208,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_4xx, .platform = "ppc405", }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 40x PPC)", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - } #endif /* CONFIG_40x */ #ifdef CONFIG_44x @@ -1433,18 +1421,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 44x PPC)", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440", - } #endif /* CONFIG_44x */ +#ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_E200 { /* e200z5 */ .pvr_mask = 0xfff00000, @@ -1473,19 +1451,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_e200, .platform = "ppc5554", }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic E200 PPC)", - .cpu_features = CPU_FTRS_E200, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_EFP_SINGLE | - PPC_FEATURE_UNIFIED_CACHE, - .dcache_bsize = 32, - .machine_check = machine_check_e200, - .platform = "ppc5554", -#endif /* CONFIG_E200 */ -#ifdef CONFIG_E500 +#elif defined(CONFIG_E500) { /* e500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80200000, @@ -1521,19 +1487,20 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_e500, .platform = "ppc8548", }, +#endif +#endif +#if !CLASSIC_PPC { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, - .cpu_name = "(generic E500 PPC)", - .cpu_features = CPU_FTRS_E500, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .cpu_name = "(generic PPC)", + .cpu_features = CPU_FTRS_GENERIC_32, + .cpu_user_features = PPC_FEATURE_32, .icache_bsize = 32, .dcache_bsize = 32, - .machine_check = machine_check_e500, .platform = "powerpc", -#endif /* CONFIG_E500 */ + } +#endif /* !CLASSIC_PPC */ #endif /* CONFIG_PPC32 */ }; diff --git a/trunk/arch/powerpc/kernel/head_44x.S b/trunk/arch/powerpc/kernel/head_44x.S index c2b9dc4fce5d..b84ec6a2fc94 100644 --- a/trunk/arch/powerpc/kernel/head_44x.S +++ b/trunk/arch/powerpc/kernel/head_44x.S @@ -653,14 +653,7 @@ finish_tlb_load: rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ - - /* - * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added - * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see - * include/asm-powerpc/pgtable-ppc32.h for details). - */ - rlwinm r12, r12, 0, 20, 10 - + rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ /* Done...restore registers and get out of here. diff --git a/trunk/arch/powerpc/kernel/head_64.S b/trunk/arch/powerpc/kernel/head_64.S index 25e84c0e1166..024805e1747d 100644 --- a/trunk/arch/powerpc/kernel/head_64.S +++ b/trunk/arch/powerpc/kernel/head_64.S @@ -1517,6 +1517,10 @@ _INIT_STATIC(start_here_multiplatform) addi r2,r2,0x4000 add r2,r2,r26 + /* Set initial ptr to current */ + LOAD_REG_IMMEDIATE(r4, init_task) + std r4,PACACURRENT(r13) + /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ diff --git a/trunk/arch/powerpc/kernel/isa-bridge.c b/trunk/arch/powerpc/kernel/isa-bridge.c index 4d5731b2429a..289af348978d 100644 --- a/trunk/arch/powerpc/kernel/isa-bridge.c +++ b/trunk/arch/powerpc/kernel/isa-bridge.c @@ -108,6 +108,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, if (size > 0x10000) size = 0x10000; + printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " + "mapping 64k\n"); + __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, size, _PAGE_NO_CACHE|_PAGE_GUARDED); return; diff --git a/trunk/arch/powerpc/kernel/setup_64.c b/trunk/arch/powerpc/kernel/setup_64.c index 098fd96a394a..25e3fd8606ab 100644 --- a/trunk/arch/powerpc/kernel/setup_64.c +++ b/trunk/arch/powerpc/kernel/setup_64.c @@ -170,8 +170,6 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { - /* -------- printk is _NOT_ safe to use here ! ------- */ - /* Fill in any unititialised pacas */ initialise_pacas(); @@ -181,14 +179,12 @@ void __init early_setup(unsigned long dt_ptr) /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ setup_paca(0); - /* Initialize lockdep early or else spinlocks will blow */ - lockdep_init(); - - /* -------- printk is now safe to use ------- */ - /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); + /* Initialize lockdep early or else spinlocks will blow */ + lockdep_init(); + DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* diff --git a/trunk/arch/powerpc/kvm/booke_guest.c b/trunk/arch/powerpc/kvm/booke_guest.c index 712d89a28c46..6d9884a6884a 100644 --- a/trunk/arch/powerpc/kvm/booke_guest.c +++ b/trunk/arch/powerpc/kvm/booke_guest.c @@ -49,7 +49,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "inst_emu", VCPU_STAT(emulated_inst_exits) }, { "dec", VCPU_STAT(dec_exits) }, { "ext_intr", VCPU_STAT(ext_intr_exits) }, - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { NULL } }; @@ -339,11 +338,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } break; - case BOOKE_INTERRUPT_FP_UNAVAIL: - kvmppc_queue_exception(vcpu, exit_nr); - r = RESUME_GUEST; - break; - case BOOKE_INTERRUPT_DATA_STORAGE: vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.esr = vcpu->arch.fault_esr; diff --git a/trunk/arch/powerpc/kvm/powerpc.c b/trunk/arch/powerpc/kvm/powerpc.c index 777e0f34e0ea..bad40bd2d3ac 100644 --- a/trunk/arch/powerpc/kvm/powerpc.c +++ b/trunk/arch/powerpc/kvm/powerpc.c @@ -36,12 +36,13 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) int kvm_cpu_has_interrupt(struct kvm_vcpu *v) { - return !!(v->arch.pending_exceptions); + /* XXX implement me */ + return 0; } int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { - return !(v->arch.msr & MSR_WE); + return 1; } @@ -213,11 +214,6 @@ static void kvmppc_decrementer_func(unsigned long data) struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); - - if (waitqueue_active(&vcpu->wq)) { - wake_up_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) @@ -343,8 +339,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int r; sigset_t sigsaved; - vcpu_load(vcpu); - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -369,20 +363,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); - vcpu_put(vcpu); - return r; } int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); - - if (waitqueue_active(&vcpu->wq)) { - wake_up_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - return 0; } diff --git a/trunk/arch/powerpc/lib/Makefile b/trunk/arch/powerpc/lib/Makefile index f1d2cdc5331b..4bb023f4c869 100644 --- a/trunk/arch/powerpc/lib/Makefile +++ b/trunk/arch/powerpc/lib/Makefile @@ -23,4 +23,3 @@ obj-$(CONFIG_SMP) += locks.o endif obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o -obj-$(CONFIG_HAS_IOMEM) += devres.o diff --git a/trunk/arch/powerpc/lib/devres.c b/trunk/arch/powerpc/lib/devres.c deleted file mode 100644 index 292115d98ea9..000000000000 --- a/trunk/arch/powerpc/lib/devres.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2008 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include /* devres_*(), devm_ioremap_release() */ -#include /* ioremap_flags() */ -#include /* EXPORT_SYMBOL() */ - -/** - * devm_ioremap_prot - Managed ioremap_flags() - * @dev: Generic device to remap IO address for - * @offset: BUS offset to map - * @size: Size of map - * @flags: Page flags - * - * Managed ioremap_prot(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, - size_t size, unsigned long flags) -{ - void __iomem **ptr, *addr; - - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - addr = ioremap_flags(offset, size, flags); - if (addr) { - *ptr = addr; - devres_add(dev, ptr); - } else - devres_free(ptr); - - return addr; -} -EXPORT_SYMBOL(devm_ioremap_prot); diff --git a/trunk/arch/powerpc/platforms/cell/interrupt.c b/trunk/arch/powerpc/platforms/cell/interrupt.c index 5bf7df146022..04f74f9f9ab6 100644 --- a/trunk/arch/powerpc/platforms/cell/interrupt.c +++ b/trunk/arch/powerpc/platforms/cell/interrupt.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include @@ -232,54 +231,6 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) "IBM,CBEA-Internal-Interrupt-Controller"); } -extern int noirqdebug; - -static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) -{ - const unsigned int cpu = smp_processor_id(); - - spin_lock(&desc->lock); - - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); - - /* - * If we're currently running this IRQ, or its disabled, - * we shouldn't process the IRQ. Mark it pending, handle - * the necessary masking and go out - */ - if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || - !desc->action)) { - desc->status |= IRQ_PENDING; - goto out_eoi; - } - - kstat_cpu(cpu).irqs[irq]++; - - /* Mark the IRQ currently in progress.*/ - desc->status |= IRQ_INPROGRESS; - - do { - struct irqaction *action = desc->action; - irqreturn_t action_ret; - - if (unlikely(!action)) - goto out_eoi; - - desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); - action_ret = handle_IRQ_event(irq, action); - if (!noirqdebug) - note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); - - } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); - - desc->status &= ~IRQ_INPROGRESS; -out_eoi: - desc->chip->eoi(irq); - spin_unlock(&desc->lock); -} - static int iic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { @@ -289,10 +240,10 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, break; case IIC_IRQ_TYPE_IOEXC: set_irq_chip_and_handler(virq, &iic_ioexc_chip, - handle_iic_irq); + handle_fasteoi_irq); break; default: - set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); + set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); } return 0; } diff --git a/trunk/arch/powerpc/platforms/cell/spu_base.c b/trunk/arch/powerpc/platforms/cell/spu_base.c index 70c660121ec4..6bab44b7716b 100644 --- a/trunk/arch/powerpc/platforms/cell/spu_base.c +++ b/trunk/arch/powerpc/platforms/cell/spu_base.c @@ -141,10 +141,6 @@ static void spu_restart_dma(struct spu *spu) if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); - else { - set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); - mb(); - } } static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) @@ -230,13 +226,11 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) return 0; } - spu->class_1_dar = ea; - spu->class_1_dsisr = dsisr; - - spu->stop_callback(spu, 1); + spu->class_0_pending = 0; + spu->dar = ea; + spu->dsisr = dsisr; - spu->class_1_dar = 0; - spu->class_1_dsisr = 0; + spu->stop_callback(spu); return 0; } @@ -324,15 +318,11 @@ spu_irq_class_0(int irq, void *data) stat = spu_int_stat_get(spu, 0) & mask; spu->class_0_pending |= stat; - spu->class_0_dsisr = spu_mfc_dsisr_get(spu); - spu->class_0_dar = spu_mfc_dar_get(spu); + spu->dsisr = spu_mfc_dsisr_get(spu); + spu->dar = spu_mfc_dar_get(spu); spin_unlock(&spu->register_lock); - spu->stop_callback(spu, 0); - - spu->class_0_pending = 0; - spu->class_0_dsisr = 0; - spu->class_0_dar = 0; + spu->stop_callback(spu); spu_int_stat_clear(spu, 0, stat); @@ -373,9 +363,6 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) ; - spu->class_1_dsisr = 0; - spu->class_1_dar = 0; - return stat ? IRQ_HANDLED : IRQ_NONE; } @@ -409,10 +396,10 @@ spu_irq_class_2(int irq, void *data) spu->ibox_callback(spu); if (stat & CLASS2_SPU_STOP_INTR) - spu->stop_callback(spu, 2); + spu->stop_callback(spu); if (stat & CLASS2_SPU_HALT_INTR) - spu->stop_callback(spu, 2); + spu->stop_callback(spu); if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) spu->mfc_callback(spu); diff --git a/trunk/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/trunk/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 906a0a2a9fe1..67fa7247b80a 100644 --- a/trunk/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/trunk/arch/powerpc/platforms/cell/spu_priv1_mmio.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -76,19 +75,8 @@ static u64 int_stat_get(struct spu *spu, int class) static void cpu_affinity_set(struct spu *spu, int cpu) { - u64 target; - u64 route; - - if (nr_cpus_node(spu->node)) { - cpumask_t spumask = node_to_cpumask(spu->node); - cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); - - if (!cpus_intersects(spumask, cpumask)) - return; - } - - target = iic_get_target_id(cpu); - route = target << 48 | target << 32 | target << 16; + u64 target = iic_get_target_id(cpu); + u64 route = target << 48 | target << 32 | target << 16; out_be64(&spu->priv1->int_route_RW, route); } diff --git a/trunk/arch/powerpc/platforms/cell/spufs/fault.c b/trunk/arch/powerpc/platforms/cell/spufs/fault.c index f093a581ac74..e46d300e21a5 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/fault.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/fault.c @@ -83,18 +83,13 @@ int spufs_handle_class0(struct spu_context *ctx) return 0; if (stat & CLASS0_DMA_ALIGNMENT_INTR) - spufs_handle_event(ctx, ctx->csa.class_0_dar, - SPE_EVENT_DMA_ALIGNMENT); + spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) - spufs_handle_event(ctx, ctx->csa.class_0_dar, - SPE_EVENT_INVALID_DMA); + spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); if (stat & CLASS0_SPU_ERROR_INTR) - spufs_handle_event(ctx, ctx->csa.class_0_dar, - SPE_EVENT_SPE_ERROR); - - ctx->csa.class_0_pending = 0; + spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); return -EIO; } @@ -124,8 +119,8 @@ int spufs_handle_class1(struct spu_context *ctx) * in time, we can still expect to get the same fault * the immediately after the context restore. */ - ea = ctx->csa.class_1_dar; - dsisr = ctx->csa.class_1_dsisr; + ea = ctx->csa.dar; + dsisr = ctx->csa.dsisr; if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) return 0; @@ -163,7 +158,7 @@ int spufs_handle_class1(struct spu_context *ctx) * time slicing will not preempt the context while the page fault * handler is running. Context switch code removes mappings. */ - ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; + ctx->csa.dar = ctx->csa.dsisr = 0; /* * If we handled the fault successfully and are in runnable diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c index f407b2471855..0c32a05ab068 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -224,7 +223,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) parent = dir->d_parent->d_inode; ctx = SPUFS_I(dir->d_inode)->i_ctx; - mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT); + mutex_lock(&parent->i_mutex); ret = spufs_rmdir(parent, dir); mutex_unlock(&parent->i_mutex); WARN_ON(ret); @@ -619,15 +618,12 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, mode &= ~current->fs->umask; if (flags & SPU_CREATE_GANG) - ret = spufs_create_gang(nd->path.dentry->d_inode, + return spufs_create_gang(nd->path.dentry->d_inode, dentry, nd->path.mnt, mode); else - ret = spufs_create_context(nd->path.dentry->d_inode, + return spufs_create_context(nd->path.dentry->d_inode, dentry, nd->path.mnt, flags, mode, filp); - if (ret >= 0) - fsnotify_mkdir(nd->path.dentry->d_inode, dentry); - return ret; out_dput: dput(dentry); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/run.c b/trunk/arch/powerpc/platforms/cell/spufs/run.c index b7493b865812..a9c35b7b719f 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/run.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/run.c @@ -11,7 +11,7 @@ #include "spufs.h" /* interrupt-level stop callback function. */ -void spufs_stop_callback(struct spu *spu, int irq) +void spufs_stop_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; @@ -24,19 +24,9 @@ void spufs_stop_callback(struct spu *spu, int irq) */ if (ctx) { /* Copy exception arguments into module specific structure */ - switch(irq) { - case 0 : - ctx->csa.class_0_pending = spu->class_0_pending; - ctx->csa.class_0_dsisr = spu->class_0_dsisr; - ctx->csa.class_0_dar = spu->class_0_dar; - break; - case 1 : - ctx->csa.class_1_dsisr = spu->class_1_dsisr; - ctx->csa.class_1_dar = spu->class_1_dar; - break; - case 2 : - break; - } + ctx->csa.class_0_pending = spu->class_0_pending; + ctx->csa.dsisr = spu->dsisr; + ctx->csa.dar = spu->dar; /* ensure that the exception status has hit memory before a * thread waiting on the context's stop queue is woken */ @@ -44,6 +34,11 @@ void spufs_stop_callback(struct spu *spu, int irq) wake_up_all(&ctx->stop_wq); } + + /* Clear callback arguments from spu structure */ + spu->class_0_pending = 0; + spu->dsisr = 0; + spu->dar = 0; } int spu_stopped(struct spu_context *ctx, u32 *stat) @@ -61,11 +56,7 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) return 1; - dsisr = ctx->csa.class_0_dsisr; - if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) - return 1; - - dsisr = ctx->csa.class_1_dsisr; + dsisr = ctx->csa.dsisr; if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) return 1; @@ -303,7 +294,7 @@ static int spu_process_callback(struct spu_context *ctx) u32 ls_pointer, npc; void __iomem *ls; long spu_ret; - int ret; + int ret, ret2; /* get syscall block from local store */ npc = ctx->ops->npc_read(ctx) & ~3; @@ -325,9 +316,11 @@ static int spu_process_callback(struct spu_context *ctx) if (spu_ret <= -ERESTARTSYS) { ret = spu_handle_restartsys(ctx, &spu_ret, &npc); } - mutex_lock(&ctx->state_mutex); + ret2 = spu_acquire(ctx); if (ret == -ERESTARTSYS) return ret; + if (ret2) + return -EINTR; } /* need to re-get the ls, as it may have changed when we released the @@ -350,14 +343,13 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) if (mutex_lock_interruptible(&ctx->run_mutex)) return -ERESTARTSYS; + spu_enable_spu(ctx); ctx->event_return = 0; ret = spu_acquire(ctx); if (ret) goto out_unlock; - spu_enable_spu(ctx); - spu_update_sched_info(ctx); ret = spu_run_init(ctx, npc); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/sched.c b/trunk/arch/powerpc/platforms/cell/spufs/sched.c index 2e411f23462b..7298e7db2c83 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/sched.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/sched.c @@ -140,9 +140,6 @@ void __spu_update_sched_info(struct spu_context *ctx) * if it is timesliced or preempted. */ ctx->cpus_allowed = current->cpus_allowed; - - /* Save the current cpu id for spu interrupt routing. */ - ctx->last_ran = raw_smp_processor_id(); } void spu_update_sched_info(struct spu_context *ctx) @@ -246,6 +243,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; + spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; @@ -659,8 +657,7 @@ static struct spu *find_victim(struct spu_context *ctx) victim->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; - if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) - spu_add_to_rq(victim); + spu_add_to_rq(victim); mutex_unlock(&victim->state_mutex); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/spufs.h b/trunk/arch/powerpc/platforms/cell/spufs/spufs.h index 454c277c1457..7312745b7540 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/trunk/arch/powerpc/platforms/cell/spufs/spufs.h @@ -121,7 +121,6 @@ struct spu_context { cpumask_t cpus_allowed; int policy; int prio; - int last_ran; /* statistics */ struct { @@ -332,7 +331,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data); /* irq callback funcs. */ void spufs_ibox_callback(struct spu *spu); void spufs_wbox_callback(struct spu *spu); -void spufs_stop_callback(struct spu *spu, int irq); +void spufs_stop_callback(struct spu *spu); void spufs_mfc_callback(struct spu *spu); void spufs_dma_callback(struct spu *spu, int type); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/switch.c b/trunk/arch/powerpc/platforms/cell/spufs/switch.c index 3df9a36eb2f5..d2a1249d36dd 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/switch.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/switch.c @@ -132,14 +132,6 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); - - /* - * This flag needs to be set before calling synchronize_irq so - * that the update will be visible to the relevant handlers - * via a simple load. - */ - set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); - clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); synchronize_irq(spu->irqs[0]); synchronize_irq(spu->irqs[1]); synchronize_irq(spu->irqs[2]); @@ -174,8 +166,9 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) /* Save, Step 7: * Restore, Step 5: * Set a software context switch pending flag. - * Done above in Step 3 - disable_interrupts(). */ + set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); + mb(); } static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) @@ -193,21 +186,20 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) MFC_CNTL_SUSPEND_COMPLETE); /* fall through */ case MFC_CNTL_SUSPEND_COMPLETE: - if (csa) + if (csa) { csa->priv2.mfc_control_RW = - in_be64(&priv2->mfc_control_RW) | + MFC_CNTL_SUSPEND_MASK | MFC_CNTL_SUSPEND_DMA_QUEUE; + } break; case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); - if (csa) - csa->priv2.mfc_control_RW = - in_be64(&priv2->mfc_control_RW) & - ~MFC_CNTL_SUSPEND_DMA_QUEUE & - ~MFC_CNTL_SUSPEND_MASK; + if (csa) { + csa->priv2.mfc_control_RW = 0; + } break; } } @@ -257,21 +249,16 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu) } } -static inline void save_mfc_stopped_status(struct spu_state *csa, - struct spu *spu) +static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; - const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | - MFC_CNTL_DMA_QUEUES_EMPTY; /* Save, Step 12: * Read MFC_CNTL[Ds]. Update saved copy of * CSA.MFC_CNTL[Ds]. - * - * update: do the same with MFC_CNTL[Q]. */ - csa->priv2.mfc_control_RW &= ~mask; - csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; + csa->priv2.mfc_control_RW |= + in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING; } static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) @@ -475,9 +462,7 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ - out_be64(&priv2->mfc_control_RW, - MFC_CNTL_PURGE_DMA_REQUEST | - MFC_CNTL_SUSPEND_MASK); + out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); eieio(); } @@ -740,14 +725,10 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu) /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag - * to context switch active. This implementation does - * not uses a switch active flag. + * to context switch active. * - * Now that we have saved the mfc in the csa, we can add in the - * restart command if an exception occurred. + * This implementation does not uses a switch active flag. */ - if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) - csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb(); } @@ -1709,13 +1690,6 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) eieio(); } -static inline void set_int_route(struct spu_state *csa, struct spu *spu) -{ - struct spu_context *ctx = spu->ctx; - - spu_cpu_affinity_set(spu, ctx->last_ran); -} - static inline void restore_other_spu_access(struct spu_state *csa, struct spu *spu) { @@ -1747,15 +1721,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) */ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); eieio(); - /* - * The queue is put back into the same state that was evident prior to - * the context switch. The suspend flag is added to the saved state in - * the csa, if the operational state was suspending or suspended. In - * this case, the code that suspended the mfc is responsible for - * continuing it. Note that SPE faults do not change the operational - * state of the spu. + * FIXME: this is to restart a DMA that we were processing + * before the save. better remember the fault information + * in the csa instead. */ + if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) { + out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); + eieio(); + } } static inline void enable_user_access(struct spu_state *csa, struct spu *spu) @@ -1814,7 +1788,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu) save_spu_runcntl(prev, spu); /* Step 9. */ save_mfc_sr1(prev, spu); /* Step 10. */ save_spu_status(prev, spu); /* Step 11. */ - save_mfc_stopped_status(prev, spu); /* Step 12. */ + save_mfc_decr(prev, spu); /* Step 12. */ halt_mfc_decr(prev, spu); /* Step 13. */ save_timebase(prev, spu); /* Step 14. */ remove_other_spu_access(prev, spu); /* Step 15. */ @@ -2026,7 +2000,6 @@ static void restore_csa(struct spu_state *next, struct spu *spu) check_ppuint_mb_stat(next, spu); /* Step 67. */ spu_invalidate_slbs(spu); /* Modified Step 68. */ restore_mfc_sr1(next, spu); /* Step 69. */ - set_int_route(next, spu); /* NEW */ restore_other_spu_access(next, spu); /* Step 70. */ restore_spu_runcntl(next, spu); /* Step 71. */ restore_mfc_cntl(next, spu); /* Step 72. */ diff --git a/trunk/arch/powerpc/platforms/chrp/pegasos_eth.c b/trunk/arch/powerpc/platforms/chrp/pegasos_eth.c index 130ff72d99dd..5bcc58d9a4dd 100644 --- a/trunk/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/trunk/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -58,9 +58,7 @@ static struct resource mv643xx_eth0_resources[] = { static struct mv643xx_eth_platform_data eth0_pd = { - .shared = &mv643xx_eth_shared_device, .port_number = 0, - .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0, .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, @@ -90,9 +88,7 @@ static struct resource mv643xx_eth1_resources[] = { }; static struct mv643xx_eth_platform_data eth1_pd = { - .shared = &mv643xx_eth_shared_device, .port_number = 1, - .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1, .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, diff --git a/trunk/arch/powerpc/platforms/pseries/scanlog.c b/trunk/arch/powerpc/platforms/pseries/scanlog.c index 417eca79df69..bec3803f0618 100644 --- a/trunk/arch/powerpc/platforms/pseries/scanlog.c +++ b/trunk/arch/powerpc/platforms/pseries/scanlog.c @@ -55,6 +55,11 @@ static ssize_t scanlog_read(struct file *file, char __user *buf, dp = PDE(inode); data = (unsigned int *)dp->data; + if (!data) { + printk(KERN_ERR "scanlog: read failed no data\n"); + return -EIO; + } + if (count > RTAS_DATA_BUF_SIZE) count = RTAS_DATA_BUF_SIZE; @@ -141,6 +146,11 @@ static int scanlog_open(struct inode * inode, struct file * file) struct proc_dir_entry *dp = PDE(inode); unsigned int *data = (unsigned int *)dp->data; + if (!data) { + printk(KERN_ERR "scanlog: open failed no data\n"); + return -EIO; + } + if (data[0] != 0) { /* This imperfect test stops a second copy of the * data (or a reset while data is being copied) @@ -158,6 +168,10 @@ static int scanlog_release(struct inode * inode, struct file * file) struct proc_dir_entry *dp = PDE(inode); unsigned int *data = (unsigned int *)dp->data; + if (!data) { + printk(KERN_ERR "scanlog: release failed no data\n"); + return -EIO; + } data[0] = 0; return 0; @@ -186,11 +200,12 @@ static int __init scanlog_init(void) if (!data) goto err; - ent = proc_create_data("ppc64/rtas/scan-log-dump", S_IRUSR, NULL, - &scanlog_fops, data); + ent = proc_create("ppc64/rtas/scan-log-dump", S_IRUSR, NULL, + &scanlog_fops); if (!ent) goto err; + ent->data = data; proc_ppc64_scan_log_dump = ent; return 0; diff --git a/trunk/arch/powerpc/sysdev/mv64x60_dev.c b/trunk/arch/powerpc/sysdev/mv64x60_dev.c index a132e0de8ca5..41af1223e2a0 100644 --- a/trunk/arch/powerpc/sysdev/mv64x60_dev.c +++ b/trunk/arch/powerpc/sysdev/mv64x60_dev.c @@ -239,8 +239,6 @@ static int __init mv64x60_eth_device_setup(struct device_node *np, int id, memset(&pdata, 0, sizeof(pdata)); - pdata.shared = shared_pdev; - prop = of_get_property(np, "reg", NULL); if (!prop) return -ENODEV; diff --git a/trunk/arch/powerpc/sysdev/ppc4xx_pci.c b/trunk/arch/powerpc/sysdev/ppc4xx_pci.c index b4a54c52e880..1814adbd2236 100644 --- a/trunk/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/trunk/arch/powerpc/sysdev/ppc4xx_pci.c @@ -1387,59 +1387,28 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, resource_size_t size = res->end - res->start + 1; u64 sa; - if (port->endpoint) { - resource_size_t ep_addr = 0; - resource_size_t ep_size = 32 << 20; - - /* Currently we map a fixed 64MByte window to PLB address - * 0 (SDRAM). This should probably be configurable via a dts - * property. - */ - - /* Calculate window size */ - sa = (0xffffffffffffffffull << ilog2(ep_size));; - - /* Setup BAR0 */ - out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); - out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | - PCI_BASE_ADDRESS_MEM_TYPE_64); - - /* Disable BAR1 & BAR2 */ - out_le32(mbase + PECFG_BAR1MPA, 0); - out_le32(mbase + PECFG_BAR2HMPA, 0); - out_le32(mbase + PECFG_BAR2LMPA, 0); - - out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); - out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); - - out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); - out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); - } else { - /* Calculate window size */ - sa = (0xffffffffffffffffull << ilog2(size));; - if (res->flags & IORESOURCE_PREFETCH) - sa |= 0x8; + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(size));; + if (res->flags & IORESOURCE_PREFETCH) + sa |= 0x8; - out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); - out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); - /* The setup of the split looks weird to me ... let's see - * if it works - */ - out_le32(mbase + PECFG_PIM0LAL, 0x00000000); - out_le32(mbase + PECFG_PIM0LAH, 0x00000000); - out_le32(mbase + PECFG_PIM1LAL, 0x00000000); - out_le32(mbase + PECFG_PIM1LAH, 0x00000000); - out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); - out_le32(mbase + PECFG_PIM01SAL, 0x00000000); - - out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); - out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); - } + /* The setup of the split looks weird to me ... let's see if it works */ + out_le32(mbase + PECFG_PIM0LAL, 0x00000000); + out_le32(mbase + PECFG_PIM0LAH, 0x00000000); + out_le32(mbase + PECFG_PIM1LAL, 0x00000000); + out_le32(mbase + PECFG_PIM1LAH, 0x00000000); + out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); + out_le32(mbase + PECFG_PIM01SAL, 0x00000000); /* Enable inbound mapping */ out_le32(mbase + PECFG_PIMEN, 0x1); + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); + /* Enable I/O, Mem, and Busmaster cycles */ out_le16(mbase + PCI_COMMAND, in_le16(mbase + PCI_COMMAND) | @@ -1453,8 +1422,13 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) const int *bus_range; int primary = 0, busses; void __iomem *mbase = NULL, *cfg_data = NULL; - const u32 *pval; - u32 val; + + /* XXX FIXME: Handle endpoint mode properly */ + if (port->endpoint) { + printk(KERN_WARNING "PCIE%d: Port in endpoint mode !\n", + port->index); + return; + } /* Check if primary bridge */ if (of_get_property(port->node, "primary", NULL)) @@ -1488,30 +1462,21 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) hose->last_busno = hose->first_busno + busses; } - if (!port->endpoint) { - /* Only map the external config space in cfg_data for - * PCIe root-complexes. External space is 1M per bus - */ - cfg_data = ioremap(port->cfg_space.start + - (hose->first_busno + 1) * 0x100000, - busses * 0x100000); - if (cfg_data == NULL) { - printk(KERN_ERR "%s: Can't map external config space !", - port->node->full_name); - goto fail; - } - hose->cfg_data = cfg_data; - } - - /* Always map the host config space in cfg_addr. - * Internal space is 4K + /* We map the external config space in cfg_data and the host config + * space in cfg_addr. External space is 1M per bus, internal space + * is 4K */ + cfg_data = ioremap(port->cfg_space.start + + (hose->first_busno + 1) * 0x100000, + busses * 0x100000); mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); - if (mbase == NULL) { - printk(KERN_ERR "%s: Can't map internal config space !", + if (cfg_data == NULL || mbase == NULL) { + printk(KERN_ERR "%s: Can't map config space !", port->node->full_name); goto fail; } + + hose->cfg_data = cfg_data; hose->cfg_addr = mbase; pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, @@ -1524,14 +1489,12 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) port->hose = hose; mbase = (void __iomem *)hose->cfg_addr; - if (!port->endpoint) { - /* - * Set bus numbers on our root port - */ - out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); - out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); - out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); - } + /* + * Set bus numbers on our root port + */ + out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); + out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); + out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); /* * OMRs are already reset, also disable PIMs @@ -1552,49 +1515,17 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); /* The root complex doesn't show up if we don't set some vendor - * and device IDs into it. The defaults below are the same bogus - * one that the initial code in arch/ppc had. This can be - * overwritten by setting the "vendor-id/device-id" properties - * in the pciex node. + * and device IDs into it. Those are the same bogus one that the + * initial code in arch/ppc add. We might want to change that. */ + out_le16(mbase + 0x200, 0xaaa0 + port->index); + out_le16(mbase + 0x202, 0xbed0 + port->index); - /* Get the (optional) vendor-/device-id from the device-tree */ - pval = of_get_property(port->node, "vendor-id", NULL); - if (pval) { - val = *pval; - } else { - if (!port->endpoint) - val = 0xaaa0 + port->index; - else - val = 0xeee0 + port->index; - } - out_le16(mbase + 0x200, val); - - pval = of_get_property(port->node, "device-id", NULL); - if (pval) { - val = *pval; - } else { - if (!port->endpoint) - val = 0xbed0 + port->index; - else - val = 0xfed0 + port->index; - } - out_le16(mbase + 0x202, val); - - if (!port->endpoint) { - /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ - out_le32(mbase + 0x208, 0x06040001); - - printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", - port->index); - } else { - /* Set Class Code to Processor/PPC */ - out_le32(mbase + 0x208, 0x0b200001); - - printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", - port->index); - } + /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ + out_le32(mbase + 0x208, 0x06040001); + printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", + port->index); return; fail: if (hose) @@ -1611,7 +1542,6 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) const u32 *pval; int portno; unsigned int dcrs; - const char *val; /* First, proceed to core initialization as we assume there's * only one PCIe core in the system @@ -1643,20 +1573,8 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) } port->sdr_base = *pval; - /* Check if device_type property is set to "pci" or "pci-endpoint". - * Resulting from this setup this PCIe port will be configured - * as root-complex or as endpoint. - */ - val = of_get_property(port->node, "device_type", NULL); - if (!strcmp(val, "pci-endpoint")) { - port->endpoint = 1; - } else if (!strcmp(val, "pci")) { - port->endpoint = 0; - } else { - printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n", - np->full_name); - return; - } + /* XXX Currently, we only support root complex mode */ + port->endpoint = 0; /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &port->cfg_space)) { diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index 1702de9395ee..52c74780f403 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -2842,11 +2842,9 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", ls_size); DUMP_FIELD(spu, "0x%x", node); DUMP_FIELD(spu, "0x%lx", flags); + DUMP_FIELD(spu, "0x%lx", dar); + DUMP_FIELD(spu, "0x%lx", dsisr); DUMP_FIELD(spu, "%d", class_0_pending); - DUMP_FIELD(spu, "0x%lx", class_0_dar); - DUMP_FIELD(spu, "0x%lx", class_0_dsisr); - DUMP_FIELD(spu, "0x%lx", class_1_dar); - DUMP_FIELD(spu, "0x%lx", class_1_dsisr); DUMP_FIELD(spu, "0x%lx", irqs[0]); DUMP_FIELD(spu, "0x%lx", irqs[1]); DUMP_FIELD(spu, "0x%lx", irqs[2]); diff --git a/trunk/arch/ppc/syslib/mv64x60.c b/trunk/arch/ppc/syslib/mv64x60.c index 418f3053de52..90fe904d3614 100644 --- a/trunk/arch/ppc/syslib/mv64x60.c +++ b/trunk/arch/ppc/syslib/mv64x60.c @@ -341,7 +341,6 @@ static struct resource mv64x60_eth0_resources[] = { }; static struct mv643xx_eth_platform_data eth0_pd = { - .shared = &mv64x60_eth_shared_device; .port_number = 0, }; @@ -367,7 +366,6 @@ static struct resource mv64x60_eth1_resources[] = { }; static struct mv643xx_eth_platform_data eth1_pd = { - .shared = &mv64x60_eth_shared_device; .port_number = 1, }; @@ -393,7 +391,6 @@ static struct resource mv64x60_eth2_resources[] = { }; static struct mv643xx_eth_platform_data eth2_pd = { - .shared = &mv64x60_eth_shared_device; .port_number = 2, }; diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 1d035082e78e..29a7940f284f 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -430,13 +430,6 @@ config CMM_IUCV Select this option to enable the special message interface to the cooperative memory management. -config PAGE_STATES - bool "Unused page notification" - help - This enables the notification of unused pages to the - hypervisor. The ESSA instruction is used to do the states - changes between a page that has content and the unused state. - config VIRT_TIMER bool "Virtual CPU timer support" help diff --git a/trunk/arch/s390/kernel/compat_wrapper.S b/trunk/arch/s390/kernel/compat_wrapper.S index d003a6e16afb..743d54f0b8db 100644 --- a/trunk/arch/s390/kernel/compat_wrapper.S +++ b/trunk/arch/s390/kernel/compat_wrapper.S @@ -121,7 +121,7 @@ sys32_ptrace_wrapper: lgfr %r3,%r3 # long llgtr %r4,%r4 # long llgfr %r5,%r5 # long - jg compat_sys_ptrace # branch to system call + jg sys_ptrace # branch to system call .globl sys32_alarm_wrapper sys32_alarm_wrapper: diff --git a/trunk/arch/s390/kernel/entry.S b/trunk/arch/s390/kernel/entry.S index 708cf9cf9a35..bdbb3bcd78a5 100644 --- a/trunk/arch/s390/kernel/entry.S +++ b/trunk/arch/s390/kernel/entry.S @@ -279,6 +279,8 @@ sysc_do_restart: st %r2,SP_R2(%r15) # store return value (change R2 on stack) sysc_return: + tm SP_PSW+1(%r15),0x01 # returning to user ? + bno BASED(sysc_restore) tm __TI_flags+3(%r9),_TIF_WORK_SVC bnz BASED(sysc_work) # there is work to do (signals etc.) sysc_restore: @@ -310,8 +312,6 @@ sysc_work_loop: # One of the work bits is on. Find out which one. # sysc_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? - bno BASED(sysc_restore) tm __TI_flags+3(%r9),_TIF_MCCK_PENDING bo BASED(sysc_mcck_pending) tm __TI_flags+3(%r9),_TIF_NEED_RESCHED @@ -602,6 +602,12 @@ io_no_vtime: la %r2,SP_PTREGS(%r15) # address of register-save area basr %r14,%r1 # branch to standard irq handler io_return: + tm SP_PSW+1(%r15),0x01 # returning to user ? +#ifdef CONFIG_PREEMPT + bno BASED(io_preempt) # no -> check for preemptive scheduling +#else + bno BASED(io_restore) # no-> skip resched & signal +#endif tm __TI_flags+3(%r9),_TIF_WORK_INT bnz BASED(io_work) # there is work to do (signals etc.) io_restore: @@ -623,18 +629,10 @@ io_restore_trace_psw: .long 0, io_restore_trace + 0x80000000 #endif -# -# switch to kernel stack, then check the TIF bits -# -io_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? -#ifndef CONFIG_PREEMPT - bno BASED(io_restore) # no-> skip resched & signal -#else - bnz BASED(io_work_user) # no -> check for preemptive scheduling - # check for preemptive scheduling +#ifdef CONFIG_PREEMPT +io_preempt: icm %r0,15,__TI_precount(%r9) - bnz BASED(io_restore) # preemption disabled + bnz BASED(io_restore) l %r1,SP_R15(%r15) s %r1,BASED(.Lc_spsize) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) @@ -648,7 +646,10 @@ io_resume_loop: br %r1 # call schedule #endif -io_work_user: +# +# switch to kernel stack, then check the TIF bits +# +io_work: l %r1,__LC_KERNEL_STACK s %r1,BASED(.Lc_spsize) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) diff --git a/trunk/arch/s390/kernel/entry64.S b/trunk/arch/s390/kernel/entry64.S index fee10177dbfc..5a4a7bcd2bba 100644 --- a/trunk/arch/s390/kernel/entry64.S +++ b/trunk/arch/s390/kernel/entry64.S @@ -271,6 +271,8 @@ sysc_noemu: stg %r2,SP_R2(%r15) # store return value (change R2 on stack) sysc_return: + tm SP_PSW+1(%r15),0x01 # returning to user ? + jno sysc_restore tm __TI_flags+7(%r9),_TIF_WORK_SVC jnz sysc_work # there is work to do (signals etc.) sysc_restore: @@ -302,8 +304,6 @@ sysc_work_loop: # One of the work bits is on. Find out which one. # sysc_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? - jno sysc_restore tm __TI_flags+7(%r9),_TIF_MCCK_PENDING jo sysc_mcck_pending tm __TI_flags+7(%r9),_TIF_NEED_RESCHED @@ -585,6 +585,12 @@ io_no_vtime: la %r2,SP_PTREGS(%r15) # address of register-save area brasl %r14,do_IRQ # call standard irq handler io_return: + tm SP_PSW+1(%r15),0x01 # returning to user ? +#ifdef CONFIG_PREEMPT + jno io_preempt # no -> check for preemptive scheduling +#else + jno io_restore # no-> skip resched & signal +#endif tm __TI_flags+7(%r9),_TIF_WORK_INT jnz io_work # there is work to do (signals etc.) io_restore: @@ -606,41 +612,10 @@ io_restore_trace_psw: .quad 0, io_restore_trace #endif -# -# There is work todo, we need to check if we return to userspace, then -# check, if we are in SIE, if yes leave it -# -io_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? -#ifndef CONFIG_PREEMPT -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - jnz io_work_user # yes -> no need to check for SIE - la %r1, BASED(sie_opcode) # we return to kernel here - lg %r2, SP_PSW+8(%r15) - clc 0(2,%r1), 0(%r2) # is current instruction = SIE? - jne io_restore # no-> return to kernel - lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE - aghi %r1, 4 - stg %r1, SP_PSW+8(%r15) - j io_restore # return to kernel -#else - jno io_restore # no-> skip resched & signal -#endif -#else - jnz io_work_user # yes -> do resched & signal -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - la %r1, BASED(sie_opcode) - lg %r2, SP_PSW+8(%r15) - clc 0(2,%r1), 0(%r2) # is current instruction = SIE? - jne 0f # no -> leave PSW alone - lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE - aghi %r1, 4 - stg %r1, SP_PSW+8(%r15) -0: -#endif - # check for preemptive scheduling +#ifdef CONFIG_PREEMPT +io_preempt: icm %r0,15,__TI_precount(%r9) - jnz io_restore # preemption is disabled + jnz io_restore # switch to kernel stack lg %r1,SP_R15(%r15) aghi %r1,-SP_SIZE @@ -654,7 +629,10 @@ io_resume_loop: jg preempt_schedule_irq #endif -io_work_user: +# +# switch to kernel stack, then check TIF bits +# +io_work: lg %r1,__LC_KERNEL_STACK aghi %r1,-SP_SIZE mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) @@ -675,11 +653,6 @@ io_work_loop: j io_restore io_work_done: -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) -sie_opcode: - .long 0xb2140000 -#endif - # # _TIF_MCCK_PENDING is set, call handler # diff --git a/trunk/arch/s390/kernel/ptrace.c b/trunk/arch/s390/kernel/ptrace.c index 35827b9bd4d1..7f4270163744 100644 --- a/trunk/arch/s390/kernel/ptrace.c +++ b/trunk/arch/s390/kernel/ptrace.c @@ -292,7 +292,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) return 0; } -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +static int +do_ptrace_normal(struct task_struct *child, long request, long addr, long data) { ptrace_area parea; int copied, ret; @@ -528,19 +529,35 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) return 0; } -long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - compat_ulong_t caddr, compat_ulong_t cdata) +static int +do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) { - unsigned long addr = caddr; - unsigned long data = cdata; + unsigned int tmp; /* 4 bytes !! */ ptrace_area_emu31 parea; int copied, ret; switch (request) { + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: + /* read word at location addr. */ + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + if (copied != sizeof(tmp)) + return -EIO; + return put_user(tmp, (unsigned int __force __user *) data); + case PTRACE_PEEKUSR: /* read the word at location addr in the USER area. */ return peek_user_emu31(child, addr, data); + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + /* write the word at location addr. */ + tmp = data; + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1); + if (copied != sizeof(tmp)) + return -EIO; + return 0; + case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ return poke_user_emu31(child, addr, data); @@ -570,11 +587,82 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, copied += sizeof(unsigned int); } return 0; + case PTRACE_GETEVENTMSG: + return put_user((__u32) child->ptrace_message, + (unsigned int __force __user *) data); + case PTRACE_GETSIGINFO: + if (child->last_siginfo == NULL) + return -EINVAL; + return copy_siginfo_to_user32((compat_siginfo_t + __force __user *) data, + child->last_siginfo); + case PTRACE_SETSIGINFO: + if (child->last_siginfo == NULL) + return -EINVAL; + return copy_siginfo_from_user32(child->last_siginfo, + (compat_siginfo_t + __force __user *) data); } - return compat_ptrace_request(child, request, addr, data); + return ptrace_request(child, request, addr, data); } #endif +long arch_ptrace(struct task_struct *child, long request, long addr, long data) +{ + switch (request) { + case PTRACE_SYSCALL: + /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: + /* restart after signal. */ + if (!valid_signal(data)) + return -EIO; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + /* make sure the single step bit is not set. */ + user_disable_single_step(child); + wake_up_process(child); + return 0; + + case PTRACE_KILL: + /* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + if (child->exit_state == EXIT_ZOMBIE) /* already dead */ + return 0; + child->exit_code = SIGKILL; + /* make sure the single step bit is not set. */ + user_disable_single_step(child); + wake_up_process(child); + return 0; + + case PTRACE_SINGLESTEP: + /* set the trap flag. */ + if (!valid_signal(data)) + return -EIO; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + user_enable_single_step(child); + /* give it a chance to run. */ + wake_up_process(child); + return 0; + + /* Do requests that differ for 31/64 bit */ + default: +#ifdef CONFIG_COMPAT + if (test_thread_flag(TIF_31BIT)) + return do_ptrace_emu31(child, request, addr, data); +#endif + return do_ptrace_normal(child, request, addr, data); + } + /* Not reached. */ + return -EIO; +} + asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit) { diff --git a/trunk/arch/s390/kvm/Kconfig b/trunk/arch/s390/kvm/Kconfig index e051cad1f1e0..1761b74d639b 100644 --- a/trunk/arch/s390/kvm/Kconfig +++ b/trunk/arch/s390/kvm/Kconfig @@ -22,6 +22,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select S390_SWITCH_AMODE + select PREEMPT ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/trunk/arch/s390/kvm/intercept.c b/trunk/arch/s390/kvm/intercept.c index 47a0b642174c..349581a26103 100644 --- a/trunk/arch/s390/kvm/intercept.c +++ b/trunk/arch/s390/kvm/intercept.c @@ -105,9 +105,6 @@ static intercept_handler_t instruction_handlers[256] = { static int handle_noop(struct kvm_vcpu *vcpu) { switch (vcpu->arch.sie_block->icptcode) { - case 0x0: - vcpu->stat.exit_null++; - break; case 0x10: vcpu->stat.exit_external_request++; break; diff --git a/trunk/arch/s390/kvm/kvm-s390.c b/trunk/arch/s390/kvm/kvm-s390.c index 0ac36a649eba..98d1e73e01f1 100644 --- a/trunk/arch/s390/kvm/kvm-s390.c +++ b/trunk/arch/s390/kvm/kvm-s390.c @@ -31,7 +31,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "userspace_handled", VCPU_STAT(exit_userspace) }, - { "exit_null", VCPU_STAT(exit_null) }, { "exit_validity", VCPU_STAT(exit_validity) }, { "exit_stop_request", VCPU_STAT(exit_stop_request) }, { "exit_external_request", VCPU_STAT(exit_external_request) }, @@ -222,6 +221,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->arch.guest_acrs); + + if (signal_pending(current)) + atomic_set_mask(CPUSTAT_STOP_INT, + &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/trunk/arch/s390/mm/Makefile b/trunk/arch/s390/mm/Makefile index 2a7458134544..fb988a48a754 100644 --- a/trunk/arch/s390/mm/Makefile +++ b/trunk/arch/s390/mm/Makefile @@ -5,4 +5,3 @@ obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PAGE_STATES) += page-states.o diff --git a/trunk/arch/s390/mm/init.c b/trunk/arch/s390/mm/init.c index 29f3a63806b9..fa31de6ae97a 100644 --- a/trunk/arch/s390/mm/init.c +++ b/trunk/arch/s390/mm/init.c @@ -126,9 +126,6 @@ void __init mem_init(void) /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); - /* Setup guest page hinting */ - cmma_init(); - /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); diff --git a/trunk/arch/s390/mm/page-states.c b/trunk/arch/s390/mm/page-states.c deleted file mode 100644 index fc0ad73ffd90..000000000000 --- a/trunk/arch/s390/mm/page-states.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * arch/s390/mm/page-states.c - * - * Copyright IBM Corp. 2008 - * - * Guest page hinting for unused pages. - * - * Author(s): Martin Schwidefsky - */ - -#include -#include -#include -#include -#include - -#define ESSA_SET_STABLE 1 -#define ESSA_SET_UNUSED 2 - -static int cmma_flag; - -static int __init cmma(char *str) -{ - char *parm; - parm = strstrip(str); - if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { - cmma_flag = 1; - return 1; - } - cmma_flag = 0; - if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) - return 1; - return 0; -} - -__setup("cmma=", cmma); - -void __init cmma_init(void) -{ - register unsigned long tmp asm("0") = 0; - register int rc asm("1") = -EOPNOTSUPP; - - if (!cmma_flag) - return; - asm volatile( - " .insn rrf,0xb9ab0000,%1,%1,0,0\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "+&d" (rc), "+&d" (tmp)); - if (rc) - cmma_flag = 0; -} - -void arch_free_page(struct page *page, int order) -{ - int i, rc; - - if (!cmma_flag) - return; - for (i = 0; i < (1 << order); i++) - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (rc) - : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), - "i" (ESSA_SET_UNUSED)); -} - -void arch_alloc_page(struct page *page, int order) -{ - int i, rc; - - if (!cmma_flag) - return; - for (i = 0; i < (1 << order); i++) - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (rc) - : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), - "i" (ESSA_SET_STABLE)); -} diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index 8a68160079a9..6a679c3e15e8 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -448,6 +448,14 @@ config SH_DREAMCAST Select Dreamcast if configuring for a SEGA Dreamcast. More information at +config SH_MPC1211 + bool "Interface MPC1211" + depends on CPU_SUBTYPE_SH7751 && BROKEN + help + CTP/PCI-SH02 is a CPU module computer that is produced + by Interface Corporation. + More information at + config SH_SH03 bool "Interface CTP/PCI-SH03" depends on CPU_SUBTYPE_SH7751 @@ -649,7 +657,8 @@ source "arch/sh/drivers/Kconfig" endmenu config ISA_DMA_API - bool + def_bool y + depends on SH_MPC1211 menu "Kernel features" @@ -657,7 +666,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call (EXPERIMENTAL)" - depends on SUPERH32 && EXPERIMENTAL + depends on EXPERIMENTAL help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -674,7 +683,7 @@ config KEXEC config CRASH_DUMP bool "kernel crash dumps (EXPERIMENTAL)" - depends on SUPERH32 && EXPERIMENTAL + depends on EXPERIMENTAL help Generate crash dump after being started by kexec. This should be normally only set in special crash dump kernels @@ -754,7 +763,7 @@ menu "Boot options" config ZERO_PAGE_OFFSET hex "Zero page offset" - default "0x00004000" if SH_SH03 + default "0x00004000" if SH_MPC1211 || SH_SH03 default "0x00010000" if PAGE_SIZE_64KB default "0x00002000" if PAGE_SIZE_8KB default "0x00001000" diff --git a/trunk/arch/sh/Kconfig.debug b/trunk/arch/sh/Kconfig.debug index 0d2ef1e9a6fd..d9d28f9dd0db 100644 --- a/trunk/arch/sh/Kconfig.debug +++ b/trunk/arch/sh/Kconfig.debug @@ -7,7 +7,6 @@ source "lib/Kconfig.debug" config SH_STANDARD_BIOS bool "Use LinuxSH standard BIOS" - depends on SUPERH32 help Say Y here if your target has the gdb-sh-stub package from www.m17n.org (or any conforming standard LinuxSH BIOS) diff --git a/trunk/arch/sh/Makefile b/trunk/arch/sh/Makefile index 8050b03d51fc..bb06f83e6239 100644 --- a/trunk/arch/sh/Makefile +++ b/trunk/arch/sh/Makefile @@ -110,6 +110,7 @@ machdir-$(CONFIG_SH_7343_SOLUTION_ENGINE) += se/7343 machdir-$(CONFIG_SH_7721_SOLUTION_ENGINE) += se/7721 machdir-$(CONFIG_SH_HP6XX) += hp6xx machdir-$(CONFIG_SH_DREAMCAST) += dreamcast +machdir-$(CONFIG_SH_MPC1211) += mpc1211 machdir-$(CONFIG_SH_SH03) += sh03 machdir-$(CONFIG_SH_SECUREEDGE5410) += snapgear machdir-$(CONFIG_SH_RTS7751R2D) += renesas/rts7751r2d diff --git a/trunk/arch/sh/boards/mpc1211/Makefile b/trunk/arch/sh/boards/mpc1211/Makefile new file mode 100644 index 000000000000..8cd31b5d200b --- /dev/null +++ b/trunk/arch/sh/boards/mpc1211/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Interface (CTP/PCI/MPC-SH02) specific parts of the kernel +# + +obj-y := setup.o rtc.o + +obj-$(CONFIG_PCI) += pci.o + diff --git a/trunk/arch/sh/boards/mpc1211/pci.c b/trunk/arch/sh/boards/mpc1211/pci.c new file mode 100644 index 000000000000..23849f70f133 --- /dev/null +++ b/trunk/arch/sh/boards/mpc1211/pci.c @@ -0,0 +1,295 @@ +/* + * Low-Level PCI Support for the MPC-1211(CTP/PCI/MPC-SH02) + * + * (c) 2002-2003 Saito.K & Jeanne + * + * Dustin McIntire (dustin@sensoria.com) + * Derived from arch/i386/kernel/pci-*.c which bore the message: + * (c) 1999--2000 Martin Mares + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct resource mpcpci_io_resource = { + "MPCPCI IO", + 0x00000000, + 0xffffffff, + IORESOURCE_IO +}; + +static struct resource mpcpci_mem_resource = { + "MPCPCI mem", + 0x00000000, + 0xffffffff, + IORESOURCE_MEM +}; + +static struct pci_ops pci_direct_conf1; +struct pci_channel board_pci_channels[] = { + {&pci_direct_conf1, &mpcpci_io_resource, &mpcpci_mem_resource, 0, 256}, + {NULL, NULL, NULL, 0, 0}, +}; + +/* + * Direct access to PCI hardware... + */ + + +#define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) + +/* + * Functions for accessing PCI configuration space with type 1 accesses + */ +static int pci_conf1_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) +{ + u32 word; + unsigned long flags; + + /* + * PCIPDR may only be accessed as 32 bit words, + * so we must do byte alignment by hand + */ + local_irq_save(flags); + writel(CONFIG_CMD(bus,devfn,where), PCIPAR); + word = readl(PCIPDR); + local_irq_restore(flags); + + switch (size) { + case 1: + switch (where & 0x3) { + case 3: + *value = (u8)(word >> 24); + break; + case 2: + *value = (u8)(word >> 16); + break; + case 1: + *value = (u8)(word >> 8); + break; + default: + *value = (u8)word; + break; + } + break; + case 2: + switch (where & 0x3) { + case 3: + *value = (u16)(word >> 24); + local_irq_save(flags); + writel(CONFIG_CMD(bus,devfn,(where+1)), PCIPAR); + word = readl(PCIPDR); + local_irq_restore(flags); + *value |= ((word & 0xff) << 8); + break; + case 2: + *value = (u16)(word >> 16); + break; + case 1: + *value = (u16)(word >> 8); + break; + default: + *value = (u16)word; + break; + } + break; + case 4: + *value = word; + break; + } + PCIDBG(4,"pci_conf1_read@0x%08x=0x%x\n", CONFIG_CMD(bus,devfn,where),*value); + return PCIBIOS_SUCCESSFUL; +} + +/* + * Since MPC-1211 only does 32bit access we'll have to do a read,mask,write operation. + * We'll allow an odd byte offset, though it should be illegal. + */ +static int pci_conf1_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) +{ + u32 word,mask = 0; + unsigned long flags; + u32 shift = (where & 3) * 8; + + if(size == 1) { + mask = ((1 << 8) - 1) << shift; // create the byte mask + } else if(size == 2){ + if(shift == 24) + return PCIBIOS_BAD_REGISTER_NUMBER; + mask = ((1 << 16) - 1) << shift; // create the word mask + } + local_irq_save(flags); + writel(CONFIG_CMD(bus,devfn,where), PCIPAR); + if(size == 4){ + writel(value, PCIPDR); + local_irq_restore(flags); + PCIDBG(4,"pci_conf1_write@0x%08x=0x%x\n", CONFIG_CMD(bus,devfn,where),value); + return PCIBIOS_SUCCESSFUL; + } + word = readl(PCIPDR); + word &= ~mask; + word |= ((value << shift) & mask); + writel(word, PCIPDR); + local_irq_restore(flags); + PCIDBG(4,"pci_conf1_write@0x%08x=0x%x\n", CONFIG_CMD(bus,devfn,where),word); + return PCIBIOS_SUCCESSFUL; +} + +#undef CONFIG_CMD + +static struct pci_ops pci_direct_conf1 = { + .read = pci_conf1_read, + .write = pci_conf1_write, +}; + +static void __devinit quirk_ali_ide_ports(struct pci_dev *dev) +{ + dev->resource[0].start = 0x1f0; + dev->resource[0].end = 0x1f7; + dev->resource[0].flags = IORESOURCE_IO; + dev->resource[1].start = 0x3f6; + dev->resource[1].end = 0x3f6; + dev->resource[1].flags = IORESOURCE_IO; + dev->resource[2].start = 0x170; + dev->resource[2].end = 0x177; + dev->resource[2].flags = IORESOURCE_IO; + dev->resource[3].start = 0x376; + dev->resource[3].end = 0x376; + dev->resource[3].flags = IORESOURCE_IO; + dev->resource[4].start = 0xf000; + dev->resource[4].end = 0xf00f; + dev->resource[4].flags = IORESOURCE_IO; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229, quirk_ali_ide_ports); + +char * __devinit pcibios_setup(char *str) +{ + return str; +} + +/* + * Called after each bus is probed, but before its children + * are examined. + */ + +void __devinit pcibios_fixup_bus(struct pci_bus *b) +{ + pci_read_bridge_bases(b); +} + +/* + * IRQ functions + */ +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin-1) + slot) % 4) + 1; +} + +static inline u8 bridge_swizzle_pci_1(u8 pin, u8 slot) +{ + return (((pin-1) - slot) & 3) + 1; +} + +static u8 __init mpc1211_swizzle(struct pci_dev *dev, u8 *pinp) +{ + unsigned long flags; + u8 pin = *pinp; + u32 word; + + for ( ; dev->bus->self; dev = dev->bus->self) { + if (!pin) + continue; + + if (dev->bus->number == 1) { + local_irq_save(flags); + writel(0x80000000 | 0x2c, PCIPAR); + word = readl(PCIPDR); + local_irq_restore(flags); + word >>= 16; + + if (word == 0x0001) + pin = bridge_swizzle_pci_1(pin, PCI_SLOT(dev->devfn)); + else + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + } else + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + } + + *pinp = pin; + + return PCI_SLOT(dev->devfn); +} + +static int __init map_mpc1211_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + /* now lookup the actual IRQ on a platform specific basis (pci-'platform'.c) */ + if (dev->bus->number == 0) { + switch (slot) { + case 13: irq = 9; break; /* USB */ + case 22: irq = 10; break; /* LAN */ + default: irq = 0; break; + } + } else { + switch (pin) { + case 0: irq = 0; break; + case 1: irq = 7; break; + case 2: irq = 9; break; + case 3: irq = 10; break; + case 4: irq = 11; break; + } + } + + if( irq < 0 ) { + PCIDBG(3, "PCI: Error mapping IRQ on device %s\n", pci_name(dev)); + return irq; + } + + PCIDBG(2, "Setting IRQ for slot %s to %d\n", pci_name(dev), irq); + + return irq; +} + +void __init pcibios_fixup_irqs(void) +{ + pci_fixup_irqs(mpc1211_swizzle, map_mpc1211_irq); +} + +void pcibios_align_resource(void *data, struct resource *res, + resource_size_t size, resource_size_t align) +{ + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + if (start >= 0x10000UL) { + if ((start & 0xffffUL) < 0x4000UL) { + start = (start & 0xffff0000UL) + 0x4000UL; + } else if ((start & 0xffffUL) >= 0xf000UL) { + start = (start & 0xffff0000UL) + 0x10000UL; + } + res->start = start; + } else { + if (start & 0x300) { + start = (start + 0x3ff) & ~0x3ff; + res->start = start; + } + } + } +} + diff --git a/trunk/arch/sh/boards/mpc1211/rtc.c b/trunk/arch/sh/boards/mpc1211/rtc.c new file mode 100644 index 000000000000..03b123a4bba4 --- /dev/null +++ b/trunk/arch/sh/boards/mpc1211/rtc.c @@ -0,0 +1,136 @@ +/* + * linux/arch/sh/kernel/rtc-mpc1211.c -- MPC-1211 on-chip RTC support + * + * Copyright (C) 2002 Saito.K & Jeanne + * + */ + +#include +#include +#include +#include +#include +#include + +unsigned long get_cmos_time(void) +{ + unsigned int year, mon, day, hour, min, sec; + + spin_lock(&rtc_lock); + + do { + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + } while (sec != CMOS_READ(RTC_SECONDS)); + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + + spin_unlock(&rtc_lock); + + year += 1900; + if (year < 1970) + year += 100; + + return mktime(year, mon, day, hour, min, sec); +} + +void mpc1211_rtc_gettimeofday(struct timeval *tv) +{ + + tv->tv_sec = get_cmos_time(); + tv->tv_usec = 0; +} + +/* arc/i386/kernel/time.c */ +/* + * In order to set the CMOS clock precisely, set_rtc_mmss has to be + * called 500 ms after the second nowtime has started, because when + * nowtime is written into the registers of the CMOS clock, it will + * jump to the next second precisely 500 ms later. Check the Motorola + * MC146818A or Dallas DS12887 data sheet for details. + * + * BUG: This routine does not handle hour overflow properly; it just + * sets the minutes. Usually you'll only notice that after reboot! + */ +static int set_rtc_mmss(unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + + /* gets recalled with irq locally disabled */ + spin_lock(&rtc_lock); + save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BCD_TO_BIN(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(real_seconds); + BIN_TO_BCD(real_minutes); + } + CMOS_WRITE(real_seconds,RTC_SECONDS); + CMOS_WRITE(real_minutes,RTC_MINUTES); + } else { + printk(KERN_WARNING + "set_rtc_mmss: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock(&rtc_lock); + + return retval; +} + +int mpc1211_rtc_settimeofday(const struct timeval *tv) +{ + unsigned long nowtime = tv->tv_sec; + + return set_rtc_mmss(nowtime); +} + +void mpc1211_time_init(void) +{ + rtc_sh_get_time = mpc1211_rtc_gettimeofday; + rtc_sh_set_time = mpc1211_rtc_settimeofday; +} + diff --git a/trunk/arch/sh/boards/mpc1211/setup.c b/trunk/arch/sh/boards/mpc1211/setup.c new file mode 100644 index 000000000000..fede36361dc7 --- /dev/null +++ b/trunk/arch/sh/boards/mpc1211/setup.c @@ -0,0 +1,347 @@ +/* + * linux/arch/sh/boards/mpc1211/setup.c + * + * Copyright (C) 2002 Saito.K & Jeanne, Fujii.Y + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ALI15X3 SMBus address offsets */ +#define SMBHSTSTS (0 + 0x3100) +#define SMBHSTCNT (1 + 0x3100) +#define SMBHSTSTART (2 + 0x3100) +#define SMBHSTCMD (7 + 0x3100) +#define SMBHSTADD (3 + 0x3100) +#define SMBHSTDAT0 (4 + 0x3100) +#define SMBHSTDAT1 (5 + 0x3100) +#define SMBBLKDAT (6 + 0x3100) + +/* Other settings */ +#define MAX_TIMEOUT 500 /* times 1/100 sec */ + +/* ALI15X3 command constants */ +#define ALI15X3_ABORT 0x04 +#define ALI15X3_T_OUT 0x08 +#define ALI15X3_QUICK 0x00 +#define ALI15X3_BYTE 0x10 +#define ALI15X3_BYTE_DATA 0x20 +#define ALI15X3_WORD_DATA 0x30 +#define ALI15X3_BLOCK_DATA 0x40 +#define ALI15X3_BLOCK_CLR 0x80 + +/* ALI15X3 status register bits */ +#define ALI15X3_STS_IDLE 0x04 +#define ALI15X3_STS_BUSY 0x08 +#define ALI15X3_STS_DONE 0x10 +#define ALI15X3_STS_DEV 0x20 /* device error */ +#define ALI15X3_STS_COLL 0x40 /* collision or no response */ +#define ALI15X3_STS_TERM 0x80 /* terminated by abort */ +#define ALI15X3_STS_ERR 0xE0 /* all the bad error bits */ + +static void __init pci_write_config(unsigned long busNo, + unsigned long devNo, + unsigned long fncNo, + unsigned long cnfAdd, + unsigned long cnfData) +{ + ctrl_outl((0x80000000 + + ((busNo & 0xff) << 16) + + ((devNo & 0x1f) << 11) + + ((fncNo & 0x07) << 8) + + (cnfAdd & 0xfc)), PCIPAR); + + ctrl_outl(cnfData, PCIPDR); +} + +/* + Initialize IRQ setting +*/ + +static unsigned char m_irq_mask = 0xfb; +static unsigned char s_irq_mask = 0xff; + +static void disable_mpc1211_irq(unsigned int irq) +{ + if( irq < 8) { + m_irq_mask |= (1 << irq); + outb(m_irq_mask,I8259_M_MR); + } else { + s_irq_mask |= (1 << (irq - 8)); + outb(s_irq_mask,I8259_S_MR); + } + +} + +static void enable_mpc1211_irq(unsigned int irq) +{ + if( irq < 8) { + m_irq_mask &= ~(1 << irq); + outb(m_irq_mask,I8259_M_MR); + } else { + s_irq_mask &= ~(1 << (irq - 8)); + outb(s_irq_mask,I8259_S_MR); + } +} + +static inline int mpc1211_irq_real(unsigned int irq) +{ + int value; + int irqmask; + + if ( irq < 8) { + irqmask = 1<= MAX_TIMEOUT){ + return -1; + } + + outb(((address & 0x7f) << 1), SMBHSTADD); + outb(0xc0, SMBHSTCNT); + outb(command & 0xff, SMBHSTCMD); + outb(no & 0x1f, SMBHSTDAT0); + + for(i = 1; i <= no; i++) { + outb(*p++, SMBBLKDAT); + } + outb(0xff, SMBHSTSTART); + + temp = inb(SMBHSTSTS); + for (timeout = 0; (timeout < MAX_TIMEOUT) && !(temp & (ALI15X3_STS_ERR | ALI15X3_STS_DONE)); timeout++) { + delay1000(); + temp = inb(SMBHSTSTS); + } + if (timeout >= MAX_TIMEOUT) { + return -2; + } + if ( temp & ALI15X3_STS_ERR ){ + return -3; + } + return 0; +} + +static struct resource heartbeat_resources[] = { + [0] = { + .start = 0xa2000000, + .end = 0xa2000000, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device heartbeat_device = { + .name = "heartbeat", + .id = -1, + .num_resources = ARRAY_SIZE(heartbeat_resources), + .resource = heartbeat_resources, +}; + +static struct platform_device *mpc1211_devices[] __initdata = { + &heartbeat_device, +}; + +static int __init mpc1211_devices_setup(void) +{ + return platform_add_devices(mpc1211_devices, + ARRAY_SIZE(mpc1211_devices)); +} +__initcall(mpc1211_devices_setup); + +/* arch/sh/boards/mpc1211/rtc.c */ +void mpc1211_time_init(void); + +static void __init mpc1211_setup(char **cmdline_p) +{ + unsigned char spd_buf[128]; + + __set_io_port_base(PA_PCI_IO); + + pci_write_config(0,0,0,0x54, 0xb0b00000); + + do { + outb(ALI15X3_ABORT, SMBHSTCNT); + spd_buf[0] = 0x0c; + spd_buf[1] = 0x43; + spd_buf[2] = 0x7f; + spd_buf[3] = 0x03; + spd_buf[4] = 0x00; + spd_buf[5] = 0x03; + spd_buf[6] = 0x00; + } while (put_smb_blk(spd_buf, 0x69, 0, 7) < 0); + + board_time_init = mpc1211_time_init; + + return 0; +} + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_mpc1211 __initmv = { + .mv_name = "Interface MPC-1211(CTP/PCI/MPC-SH02)", + .mv_setup = mpc1211_setup, + .mv_nr_irqs = 48, + .mv_irq_demux = mpc1211_irq_demux, + .mv_init_irq = init_mpc1211_IRQ, +}; diff --git a/trunk/arch/sh/boards/renesas/migor/setup.c b/trunk/arch/sh/boards/renesas/migor/setup.c index 01af44245b57..e7c150d49702 100644 --- a/trunk/arch/sh/boards/renesas/migor/setup.c +++ b/trunk/arch/sh/boards/renesas/migor/setup.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -28,11 +27,6 @@ * 0x18000000 8GB 8 NAND Flash (K9K8G08U0A) */ -static struct smc91x_platdata smc91x_info = { - .flags = SMC91X_USE_16BIT, - .irq_flags = IRQF_TRIGGER_HIGH, -}; - static struct resource smc91x_eth_resources[] = { [0] = { .name = "SMC91C111" , @@ -42,7 +36,7 @@ static struct resource smc91x_eth_resources[] = { }, [1] = { .start = 32, /* IRQ0 */ - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, }, }; @@ -50,9 +44,6 @@ static struct platform_device smc91x_eth_device = { .name = "smc91x", .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, - .dev = { - .platform_data = &smc91x_info, - }, }; static struct sh_keysc_info sh_keysc_info = { diff --git a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c index ae1cfcb29700..68f0ad1b637d 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780mp.c @@ -62,7 +62,7 @@ static unsigned char irl2irq[HL_NR_IRL] __initdata = { static DECLARE_INTC_DESC(intc_desc, "r7780mp", vectors, NULL, mask_registers, NULL, NULL); -unsigned char * __init highlander_plat_irq_setup(void) +unsigned char * __init highlander_init_irq_r7780mp(void) { if ((ctrl_inw(0xa4000700) & 0xf000) == 0x2000) { printk(KERN_INFO "Using r7780mp interrupt controller.\n"); diff --git a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c index 9d3921fe27c0..bd34048ed0e1 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7780rp.c @@ -55,7 +55,7 @@ static unsigned char irl2irq[HL_NR_IRL] __initdata = { static DECLARE_INTC_DESC(intc_desc, "r7780rp", vectors, NULL, mask_registers, NULL, NULL); -unsigned char * __init highlander_plat_irq_setup(void) +unsigned char * __init highlander_init_irq_r7780rp(void) { if (ctrl_inw(0xa5000600)) { printk(KERN_INFO "Using r7780rp interrupt controller.\n"); diff --git a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c index 896c045aa39d..bf7ec107fbc6 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/irq-r7785rp.c @@ -64,7 +64,7 @@ static unsigned char irl2irq[HL_NR_IRL] __initdata = { static DECLARE_INTC_DESC(intc_desc, "r7785rp", vectors, NULL, mask_registers, NULL, NULL); -unsigned char * __init highlander_plat_irq_setup(void) +unsigned char * __init highlander_init_irq_r7785rp(void) { if ((ctrl_inw(0xa4000158) & 0xf000) != 0x1000) return NULL; diff --git a/trunk/arch/sh/boards/renesas/r7780rp/setup.c b/trunk/arch/sh/boards/renesas/r7780rp/setup.c index bc79afb6fc4c..ac0a96522e45 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/setup.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/setup.c @@ -316,7 +316,7 @@ static void __init highlander_setup(char **cmdline_p) static unsigned char irl2irq[HL_NR_IRL]; -static int highlander_irq_demux(int irq) +int highlander_irq_demux(int irq) { if (irq >= HL_NR_IRL || !irl2irq[irq]) return irq; @@ -324,9 +324,27 @@ static int highlander_irq_demux(int irq) return irl2irq[irq]; } -static void __init highlander_init_irq(void) +void __init highlander_init_irq(void) { - unsigned char *ucp = highlander_plat_irq_setup(); + unsigned char *ucp = NULL; + + do { +#ifdef CONFIG_SH_R7780MP + ucp = highlander_init_irq_r7780mp(); + if (ucp) + break; +#endif +#ifdef CONFIG_SH_R7785RP + ucp = highlander_init_irq_r7785rp(); + if (ucp) + break; +#endif +#ifdef CONFIG_SH_R7780RP + ucp = highlander_init_irq_r7780rp(); + if (ucp) + break; +#endif + } while (0); if (ucp) { plat_irq_setup_pins(IRQ_MODE_IRL3210); diff --git a/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c b/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c index 452d0d6459a4..f21ee49ef3a5 100644 --- a/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c +++ b/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c @@ -109,6 +109,7 @@ static struct platform_device heartbeat_device = { .resource = heartbeat_resources, }; +#ifdef CONFIG_MFD_SM501 static struct plat_serial8250_port uart_platform_data[] = { { .membase = (void __iomem *)0xb3e30000, @@ -207,9 +208,13 @@ static struct platform_device sm501_device = { .resource = sm501_resources, }; +#endif /* CONFIG_MFD_SM501 */ + static struct platform_device *rts7751r2d_devices[] __initdata = { +#ifdef CONFIG_MFD_SM501 &uart_device, &sm501_device, +#endif &heartbeat_device, &spi_sh_sci_device, }; @@ -229,9 +234,7 @@ static int __init rts7751r2d_devices_setup(void) { if (register_trapped_io(&cf_trapped_io) == 0) platform_device_register(&cf_ide_device); - spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus)); - return platform_add_devices(rts7751r2d_devices, ARRAY_SIZE(rts7751r2d_devices)); } diff --git a/trunk/arch/sh/boards/se/7206/setup.c b/trunk/arch/sh/boards/se/7206/setup.c index 4fe84cc08406..5b3ee089d91d 100644 --- a/trunk/arch/sh/boards/se/7206/setup.c +++ b/trunk/arch/sh/boards/se/7206/setup.c @@ -3,13 +3,12 @@ * linux/arch/sh/boards/se/7206/setup.c * * Copyright (C) 2006 Yoshinori Sato - * Copyright (C) 2007 - 2008 Paul Mundt + * Copyright (C) 2007 Paul Mundt * * Hitachi 7206 SolutionEngine Support. */ #include #include -#include #include #include #include @@ -17,9 +16,8 @@ static struct resource smc91x_resources[] = { [0] = { - .name = "smc91x-regs", - .start = PA_SMSC + 0x300, - .end = PA_SMSC + 0x300 + 0x020 - 1, + .start = 0x300, + .end = 0x300 + 0x020 - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -29,18 +27,9 @@ static struct resource smc91x_resources[] = { }, }; -static struct smc91x_platdata smc91x_info = { - .flags = SMC91X_USE_16BIT, -}; - static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, - .dev = { - .dma_mask = NULL, - .coherent_dma_mask = 0xffffffff, - .platform_data = &smc91x_info, - }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; diff --git a/trunk/arch/sh/boards/se/7722/setup.c b/trunk/arch/sh/boards/se/7722/setup.c index ede3957fc14a..33f6ee71f848 100644 --- a/trunk/arch/sh/boards/se/7722/setup.c +++ b/trunk/arch/sh/boards/se/7722/setup.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -45,10 +44,6 @@ static struct platform_device heartbeat_device = { }; /* SMC91x */ -static struct smc91x_platdata smc91x_info = { - .flags = SMC91X_USE_16BIT, -}; - static struct resource smc91x_eth_resources[] = { [0] = { .name = "smc91x-regs" , @@ -69,7 +64,6 @@ static struct platform_device smc91x_eth_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, - .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, diff --git a/trunk/arch/sh/boot/compressed/Makefile_32 b/trunk/arch/sh/boot/compressed/Makefile_32 index c0d25fb1aa60..6ac8d4a4ed1d 100644 --- a/trunk/arch/sh/boot/compressed/Makefile_32 +++ b/trunk/arch/sh/boot/compressed/Makefile_32 @@ -6,6 +6,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \ head_32.o misc_32.o piggy.o +EXTRA_AFLAGS := -traditional OBJECTS = $(obj)/head_32.o $(obj)/misc_32.o diff --git a/trunk/arch/sh/boot/compressed/Makefile_64 b/trunk/arch/sh/boot/compressed/Makefile_64 index 912f3e205a0d..4334f2b86d8f 100644 --- a/trunk/arch/sh/boot/compressed/Makefile_64 +++ b/trunk/arch/sh/boot/compressed/Makefile_64 @@ -13,6 +13,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \ head_64.o misc_64.o cache.o piggy.o +EXTRA_AFLAGS := -traditional OBJECTS := $(obj)/vmlinux_64.lds $(obj)/head_64.o $(obj)/misc_64.o \ $(obj)/cache.o diff --git a/trunk/arch/sh/kernel/cpu/irq/intc-sh5.c b/trunk/arch/sh/kernel/cpu/irq/intc-sh5.c index de45c6a3e33b..d6e0e2bdaad5 100644 --- a/trunk/arch/sh/kernel/cpu/irq/intc-sh5.c +++ b/trunk/arch/sh/kernel/cpu/irq/intc-sh5.c @@ -184,8 +184,9 @@ int intc_irq_describe(char* p, int irq) void __init plat_irq_setup(void) { - unsigned long long __dummy0, __dummy1=~0x00000000100000f0; + unsigned long long __dummy0, __dummy1=~0x00000000100000f0; unsigned long reg; + unsigned long data; int i; intc_virt = onchip_remap(INTC_BASE, 1024, "INTC"); @@ -195,8 +196,11 @@ void __init plat_irq_setup(void) /* Set default: per-line enable/disable, priority driven ack/eoi */ - for (i = 0; i < NR_INTC_IRQS; i++) - irq_desc[i].chip = &intc_irq_type; + for (i = 0; i < NR_INTC_IRQS; i++) { + if (platform_int_priority[i] != NO_PRIORITY) { + irq_desc[i].chip = &intc_irq_type; + } + } /* Disable all interrupts and set all priorities to 0 to avoid trouble */ @@ -207,42 +211,35 @@ void __init plat_irq_setup(void) ctrl_outl( NO_PRIORITY, reg); -#ifdef CONFIG_SH_CAYMAN - { - unsigned long data; - - /* Set IRLM */ - /* If all the priorities are set to 'no priority', then - * assume we are using encoded mode. - */ - irlm = platform_int_priority[IRQ_IRL0] + - platform_int_priority[IRQ_IRL1] + - platform_int_priority[IRQ_IRL2] + - platform_int_priority[IRQ_IRL3]; - if (irlm == NO_PRIORITY) { - /* IRLM = 0 */ - reg = INTC_ICR_CLEAR; - i = IRQ_INTA; - printk("Trying to use encoded IRL0-3. IRLs unsupported.\n"); - } else { - /* IRLM = 1 */ - reg = INTC_ICR_SET; - i = IRQ_IRL0; - } - ctrl_outl(INTC_ICR_IRLM, reg); - - /* Set interrupt priorities according to platform description */ - for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { - data |= platform_int_priority[i] << - ((i % INTC_INTPRI_PPREG) * 4); - if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { - /* Upon the 7th, set Priority Register */ - ctrl_outl(data, reg); - data = 0; - reg += 8; - } + /* Set IRLM */ + /* If all the priorities are set to 'no priority', then + * assume we are using encoded mode. + */ + irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \ + platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3]; + + if (irlm == NO_PRIORITY) { + /* IRLM = 0 */ + reg = INTC_ICR_CLEAR; + i = IRQ_INTA; + printk("Trying to use encoded IRL0-3. IRLs unsupported.\n"); + } else { + /* IRLM = 1 */ + reg = INTC_ICR_SET; + i = IRQ_IRL0; + } + ctrl_outl(INTC_ICR_IRLM, reg); + + /* Set interrupt priorities according to platform description */ + for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { + data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4); + if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { + /* Upon the 7th, set Priority Register */ + ctrl_outl(data, reg); + data = 0; + reg += 8; } -#endif + } /* * And now let interrupts come in. diff --git a/trunk/arch/sh/kernel/cpu/irq/intc.c b/trunk/arch/sh/kernel/cpu/irq/intc.c index da5dae787888..84806b2027f8 100644 --- a/trunk/arch/sh/kernel/cpu/irq/intc.c +++ b/trunk/arch/sh/kernel/cpu/irq/intc.c @@ -1,7 +1,7 @@ /* * Shared interrupt handling code for IPR and INTC2 types of IRQs. * - * Copyright (C) 2007, 2008 Magnus Damm + * Copyright (C) 2007 Magnus Damm * * Based on intc2.c and ipr.c * @@ -62,9 +62,6 @@ struct intc_desc_int { #endif static unsigned int intc_prio_level[NR_IRQS]; /* for now */ -#ifdef CONFIG_CPU_SH3 -static unsigned long ack_handle[NR_IRQS]; -#endif static inline struct intc_desc_int *get_intc_desc(unsigned int irq) { @@ -101,26 +98,17 @@ static void write_32(unsigned long addr, unsigned long h, unsigned long data) static void modify_8(unsigned long addr, unsigned long h, unsigned long data) { - unsigned long flags; - local_irq_save(flags); ctrl_outb(set_field(ctrl_inb(addr), data, h), addr); - local_irq_restore(flags); } static void modify_16(unsigned long addr, unsigned long h, unsigned long data) { - unsigned long flags; - local_irq_save(flags); ctrl_outw(set_field(ctrl_inw(addr), data, h), addr); - local_irq_restore(flags); } static void modify_32(unsigned long addr, unsigned long h, unsigned long data) { - unsigned long flags; - local_irq_save(flags); ctrl_outl(set_field(ctrl_inl(addr), data, h), addr); - local_irq_restore(flags); } enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 }; @@ -231,25 +219,6 @@ static void intc_disable(unsigned int irq) } } -#ifdef CONFIG_CPU_SH3 -static void intc_mask_ack(unsigned int irq) -{ - struct intc_desc_int *d = get_intc_desc(irq); - unsigned long handle = ack_handle[irq]; - unsigned long addr; - - intc_disable(irq); - - /* read register and write zero only to the assocaited bit */ - - if (handle) { - addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); - ctrl_inb(addr); - ctrl_outb(0x3f ^ set_field(0, 1, handle), addr); - } -} -#endif - static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, unsigned int nr_hp, unsigned int irq) @@ -311,12 +280,7 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { [IRQ_TYPE_EDGE_FALLING] = VALID(0), [IRQ_TYPE_EDGE_RISING] = VALID(1), [IRQ_TYPE_LEVEL_LOW] = VALID(2), - /* SH7706, SH7707 and SH7709 do not support high level triggered */ -#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ - !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ - !defined(CONFIG_CPU_SUBTYPE_SH7709) [IRQ_TYPE_LEVEL_HIGH] = VALID(3), -#endif }; static int intc_set_sense(unsigned int irq, unsigned int type) @@ -466,40 +430,6 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc, return 0; } -#ifdef CONFIG_CPU_SH3 -static unsigned int __init intc_ack_data(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id) -{ - struct intc_mask_reg *mr = desc->ack_regs; - unsigned int i, j, fn, mode; - unsigned long reg_e, reg_d; - - for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { - mr = desc->ack_regs + i; - - for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { - if (mr->enum_ids[j] != enum_id) - continue; - - fn = REG_FN_MODIFY_BASE; - mode = MODE_ENABLE_REG; - reg_e = mr->set_reg; - reg_d = mr->set_reg; - - fn += (mr->reg_width >> 3) - 1; - return _INTC_MK(fn, mode, - intc_get_reg(d, reg_e), - intc_get_reg(d, reg_d), - 1, - (mr->reg_width - 1) - j); - } - } - - return 0; -} -#endif - static unsigned int __init intc_sense_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) @@ -600,11 +530,6 @@ static void __init intc_register_irq(struct intc_desc *desc, /* irq should be disabled by default */ d->chip.mask(irq); - -#ifdef CONFIG_CPU_SH3 - if (desc->ack_regs) - ack_handle[irq] = intc_ack_data(desc, d, enum_id); -#endif } static unsigned int __init save_reg(struct intc_desc_int *d, @@ -635,9 +560,6 @@ void __init register_intc_controller(struct intc_desc *desc) d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; -#ifdef CONFIG_CPU_SH3 - d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; -#endif d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg)); #ifdef CONFIG_SMP d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp)); @@ -670,23 +592,14 @@ void __init register_intc_controller(struct intc_desc *desc) } } + BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + d->chip.name = desc->name; d->chip.mask = intc_disable; d->chip.unmask = intc_enable; d->chip.mask_ack = intc_disable; d->chip.set_type = intc_set_sense; -#ifdef CONFIG_CPU_SH3 - if (desc->ack_regs) { - for (i = 0; i < desc->nr_ack_regs; i++) - k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); - - d->chip.mask_ack = intc_mask_ack; - } -#endif - - BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ - for (i = 0; i < desc->nr_vectors; i++) { struct intc_vect *vect = desc->vectors + i; diff --git a/trunk/arch/sh/kernel/cpu/sh2a/fpu.c b/trunk/arch/sh/kernel/cpu/sh2a/fpu.c index 6df2fb98eb30..5627c0b3ffa8 100644 --- a/trunk/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/trunk/arch/sh/kernel/cpu/sh2a/fpu.c @@ -300,7 +300,7 @@ static int denormal_addf(int hx, int hy) iy = hy & 0x7fffffff; if (iy < 0x00800000) { ix = denormal_subf1(ix, iy); - if ((int) ix < 0) { + if (ix < 0) { ix = -ix; sign ^= 0x80000000; } @@ -385,7 +385,7 @@ static long long denormal_addd(long long hx, long long hy) iy = hy & 0x7fffffffffffffffLL; if (iy < 0x0010000000000000LL) { ix = denormal_subd1(ix, iy); - if ((int) ix < 0) { + if (ix < 0) { ix = -ix; sign ^= 0x8000000000000000LL; } diff --git a/trunk/arch/sh/kernel/cpu/sh3/Makefile b/trunk/arch/sh/kernel/cpu/sh3/Makefile index 511de55af832..3ae4d9111f19 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh3/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux/SuperH SH-3 backends. # -obj-y := ex.o probe.o entry.o setup-sh3.o +obj-y := ex.o probe.o entry.o # CPU subtype setup obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh3.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh3.c deleted file mode 100644 index c98846857855..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh3.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Shared SH3 Setup code - * - * Copyright (C) 2008 Magnus Damm - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include - -/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */ - -enum { - UNUSED = 0, - - /* interrupt sources */ - IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, -}; - -static struct intc_vect vectors_irq0123[] __initdata = { - INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), - INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), -}; - -static struct intc_vect vectors_irq45[] __initdata = { - INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), -}; - -static struct intc_prio_reg prio_registers[] __initdata = { - { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, - { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } }, -}; - -static struct intc_mask_reg ack_registers[] __initdata = { - { 0xa4000004, 0, 8, /* IRR0 */ - { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, -}; - -static struct intc_sense_reg sense_registers[] __initdata = { - { 0xa4000010, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, -}; - -static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh3-irq0123", - vectors_irq0123, NULL, NULL, - prio_registers, sense_registers, ack_registers); - -static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45", - vectors_irq45, NULL, NULL, - prio_registers, sense_registers, ack_registers); - -#define INTC_ICR1 0xa4000010UL -#define INTC_ICR1_IRQLVL (1<<14) - -void __init plat_irq_setup_pins(int mode) -{ - if (mode == IRQ_MODE_IRQ) { - ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); - register_intc_controller(&intc_desc_irq0123); - return; - } - BUG(); -} - -void __init plat_irq_setup_sh3(void) -{ - register_intc_controller(&intc_desc_irq45); -} diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 6468ae86b944..f581534cb732 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -37,7 +37,7 @@ enum { }; static struct intc_vect vectors[] __initdata = { - /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720), INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), @@ -48,7 +48,7 @@ static struct intc_vect vectors[] __initdata = { INTC_VECT(ADC_ADI, 0x980), INTC_VECT(USB_USI0, 0xa20), INTC_VECT(USB_USI1, 0xa40), INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20), - INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0), + INTC_VECT(TPU3, 0xc80), INTC_VECT(TPU1, 0xca0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), @@ -81,6 +81,14 @@ static struct intc_prio_reg prio_registers[] __initdata = { static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups, NULL, prio_registers, NULL); +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL, + NULL, prio_registers, NULL); + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xa4410000, @@ -151,8 +159,16 @@ static int __init sh7705_devices_setup(void) } __initcall(sh7705_devices_setup); +void __init plat_irq_setup_pins(int mode) +{ + if (mode == IRQ_MODE_IRQ) { + register_intc_controller(&intc_desc_irq); + return; + } + BUG(); +} + void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); - plat_irq_setup_sh3(); } diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 93c55e2ed952..d3733b13ea52 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -52,7 +52,7 @@ static struct intc_vect vectors[] __initdata = { #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7709) - /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), INTC_VECT(ADC_ADI, 0x980), @@ -104,6 +104,18 @@ static struct intc_prio_reg prio_registers[] __initdata = { static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups, NULL, prio_registers, NULL); +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL, + NULL, prio_registers, NULL); +#endif + static struct resource rtc_resources[] = { [0] = { .start = 0xfffffec0, @@ -182,12 +194,24 @@ static int __init sh770x_devices_setup(void) } __initcall(sh770x_devices_setup); -void __init plat_irq_setup(void) +#define INTC_ICR1 0xa4000010UL +#define INTC_ICR1_IRQLVL (1<<14) + +void __init plat_irq_setup_pins(int mode) { - register_intc_controller(&intc_desc); + if (mode == IRQ_MODE_IRQ) { #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ defined(CONFIG_CPU_SUBTYPE_SH7707) || \ defined(CONFIG_CPU_SUBTYPE_SH7709) - plat_irq_setup_sh3(); + ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); + register_intc_controller(&intc_desc_irq); + return; #endif + } + BUG(); +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); } diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c index 77eee481de47..7406c9ad9259 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -38,7 +38,7 @@ enum { }; static struct intc_vect vectors[] __initdata = { - /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820), INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860), INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), @@ -79,7 +79,10 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } }, { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC1, SCIF0, SCIF1 } }, - { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC, DMAC2 } }, + { 0xa4080000, 0, 16, 4, /* IPRF */ { 0, DMAC2 } }, +#ifdef CONFIG_CPU_SUBTYPE_SH7710 + { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC } }, +#endif { 0xa4080002, 0, 16, 4, /* IPRG */ { EDMAC0, EDMAC1, EDMAC2 } }, { 0xa4080004, 0, 16, 4, /* IPRH */ { 0, 0, 0, SIOF0 } }, { 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } }, @@ -88,6 +91,14 @@ static struct intc_prio_reg prio_registers[] __initdata = { static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups, NULL, prio_registers, NULL); +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL, + NULL, prio_registers, NULL); + static struct resource rtc_resources[] = { [0] = { .start = 0xa413fec0, @@ -159,8 +170,16 @@ static int __init sh7710_devices_setup(void) } __initcall(sh7710_devices_setup); +void __init plat_irq_setup_pins(int mode) +{ + if (mode == IRQ_MODE_IRQ) { + register_intc_controller(&intc_desc_irq); + return; + } + BUG(); +} + void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); - plat_irq_setup_sh3(); } diff --git a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c index f807a21b066c..8028082527c5 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/trunk/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -19,6 +19,10 @@ #include #include +#define INTC_ICR1 0xA4140010UL +#define INTC_ICR_IRLM 0x4000 +#define INTC_ICR_IRQ (~INTC_ICR_IRLM) + static struct resource rtc_resources[] = { [0] = { .start = 0xa413fec0, @@ -166,7 +170,6 @@ enum { }; static struct intc_vect vectors[] __initdata = { - /* IRQ0->5 are handled in setup-sh3.c */ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), INTC_VECT(RTC_CUI, 0x4c0), @@ -211,7 +214,11 @@ static struct intc_prio_reg prio_registers[] __initdata = { { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } }, { 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, { 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } }, +#if defined(CONFIG_CPU_SUBTYPE_SH7720) { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } }, +#else + { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } }, +#endif { 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } }, { 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } }, { 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } }, @@ -222,8 +229,32 @@ static struct intc_prio_reg prio_registers[] __initdata = { static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups, NULL, prio_registers, NULL); +static struct intc_sense_reg sense_registers[] __initdata = { + { INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, +}; + +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), +}; + +static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq, + NULL, NULL, prio_registers, sense_registers); + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: + ctrl_outw(ctrl_inw(INTC_ICR1) & INTC_ICR_IRQ, INTC_ICR1); + register_intc_controller(&intc_irq_desc); + break; + default: + BUG(); + } +} + void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); - plat_irq_setup_sh3(); } diff --git a/trunk/arch/sh/kernel/cpu/sh5/entry.S b/trunk/arch/sh/kernel/cpu/sh5/entry.S index 05372ed6c568..ba8750176d91 100644 --- a/trunk/arch/sh/kernel/cpu/sh5/entry.S +++ b/trunk/arch/sh/kernel/cpu/sh5/entry.S @@ -143,22 +143,12 @@ resvec_save_area: trap_jtable: .long do_exception_error /* 0x000 */ .long do_exception_error /* 0x020 */ -#ifdef CONFIG_MMU .long tlb_miss_load /* 0x040 */ .long tlb_miss_store /* 0x060 */ -#else - .long do_exception_error - .long do_exception_error -#endif ! ARTIFICIAL pseudo-EXPEVT setting .long do_debug_interrupt /* 0x080 */ -#ifdef CONFIG_MMU .long tlb_miss_load /* 0x0A0 */ .long tlb_miss_store /* 0x0C0 */ -#else - .long do_exception_error - .long do_exception_error -#endif .long do_address_error_load /* 0x0E0 */ .long do_address_error_store /* 0x100 */ #ifdef CONFIG_SH_FPU @@ -195,18 +185,10 @@ trap_jtable: .endr .long do_IRQ /* 0xA00 */ .long do_IRQ /* 0xA20 */ -#ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xA40 */ -#else - .long do_IRQ -#endif .long do_IRQ /* 0xA60 */ .long do_IRQ /* 0xA80 */ -#ifdef CONFIG_MMU .long itlb_miss_or_IRQ /* 0xAA0 */ -#else - .long do_IRQ -#endif .long do_exception_error /* 0xAC0 */ .long do_address_error_exec /* 0xAE0 */ .rept 8 @@ -292,7 +274,6 @@ not_a_tlb_miss: * Instead of '.space 1024-TEXT_SIZE' place the RESVEC * block making sure the final alignment is correct. */ -#ifdef CONFIG_MMU tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, KCR1 @@ -396,9 +377,6 @@ fixup_to_invoke_general_handler: getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO -#else /* CONFIG_MMU */ - .balign 256 -#endif /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE DOES END UP AT VBR+0x600 */ @@ -1125,7 +1103,6 @@ restore_all: * fpu_error_or_IRQ? is a helper to deflect to the right cause. * */ -#ifdef CONFIG_MMU tlb_miss_load: or SP, ZERO, r2 or ZERO, ZERO, r3 /* Read */ @@ -1155,7 +1132,6 @@ call_do_page_fault: movi do_page_fault, r6 ptabs r6, tr0 blink tr0, ZERO -#endif /* CONFIG_MMU */ fpu_error_or_IRQA: pta its_IRQ, tr0 @@ -1505,7 +1481,6 @@ poke_real_address_q: ptabs LINK, tr0 blink tr0, r63 -#ifdef CONFIG_MMU /* * --- User Access Handling Section */ @@ -1629,7 +1604,6 @@ ___clear_user_exit: ptabs LINK, tr0 blink tr0, ZERO -#endif /* CONFIG_MMU */ /* * int __strncpy_from_user(unsigned long __dest, unsigned long __src, @@ -2040,11 +2014,9 @@ sa_default_restorer: .global asm_uaccess_start /* Just a marker */ asm_uaccess_start: -#ifdef CONFIG_MMU .long ___copy_user1, ___copy_user_exit .long ___copy_user2, ___copy_user_exit .long ___clear_user1, ___clear_user_exit -#endif .long ___strncpy_from_user1, ___strncpy_from_user_exit .long ___strnlen_user1, ___strnlen_user_exit .long ___get_user_asm_b1, ___get_user_asm_b_exit diff --git a/trunk/arch/sh/kernel/cpu/sh5/probe.c b/trunk/arch/sh/kernel/cpu/sh5/probe.c index 92ad844b5c12..31f8cb0f6374 100644 --- a/trunk/arch/sh/kernel/cpu/sh5/probe.c +++ b/trunk/arch/sh/kernel/cpu/sh5/probe.c @@ -15,7 +15,6 @@ #include #include #include -#include int __init detect_cpu_and_cache_system(void) { @@ -68,8 +67,5 @@ int __init detect_cpu_and_cache_system(void) set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags)); #endif - /* Setup some I/D TLB defaults */ - sh64_tlb_init(); - return 0; } diff --git a/trunk/arch/sh/kernel/early_printk.c b/trunk/arch/sh/kernel/early_printk.c index 6b7d166694e2..957f25611543 100644 --- a/trunk/arch/sh/kernel/early_printk.c +++ b/trunk/arch/sh/kernel/early_printk.c @@ -141,9 +141,7 @@ static void scif_sercon_init(char *s) */ static void scif_sercon_init(char *s) { - struct uart_port *port = &scif_port; unsigned baud = DEFAULT_BAUD; - unsigned int status; char *e; if (*s == ',') @@ -162,25 +160,19 @@ static void scif_sercon_init(char *s) baud = DEFAULT_BAUD; } - do { - status = sci_in(port, SCxSR); - } while (!(status & SCxSR_TEND(port))); - - sci_out(port, SCSCR, 0); /* TE=0, RE=0 */ - sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); - sci_out(port, SCSMR, 0); + ctrl_outw(0, scif_port.mapbase + 8); + ctrl_outw(0, scif_port.mapbase); /* Set baud rate */ - sci_out(port, SCBRR, (CONFIG_SH_PCLK_FREQ + 16 * baud) / - (32 * baud) - 1); - udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ - - sci_out(port, SCSPTR, 0); - sci_out(port, SCxSR, 0x60); - sci_out(port, SCLSR, 0); - - sci_out(port, SCFCR, 0); - sci_out(port, SCSCR, 0x30); /* TE=1, RE=1 */ + ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) / + (32 * baud) - 1, scif_port.mapbase + 4); + + ctrl_outw(12, scif_port.mapbase + 24); + ctrl_outw(8, scif_port.mapbase + 24); + ctrl_outw(0, scif_port.mapbase + 32); + ctrl_outw(0x60, scif_port.mapbase + 16); + ctrl_outw(0, scif_port.mapbase + 36); + ctrl_outw(0x30, scif_port.mapbase + 8); } #endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */ #endif /* !defined(CONFIG_SH_STANDARD_BIOS) */ diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 516bde9c50fa..284f66f1ebbe 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -53,7 +53,6 @@ EXPORT_SYMBOL(cpu_data); * sh_mv= on the command line, prior to .machvec.init teardown. */ struct sh_machine_vector sh_mv = { .mv_name = "generic", }; -EXPORT_SYMBOL(sh_mv); #ifdef CONFIG_VT struct screen_info screen_info; @@ -77,18 +76,11 @@ static struct resource data_resource = { .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; -static struct resource bss_resource = { - .name = "Kernel bss", - .flags = IORESOURCE_BUSY | IORESOURCE_MEM, -}; - unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_end = 0; EXPORT_SYMBOL(memory_end); -static struct resource mem_resources[MAX_NUMNODES]; - int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; static int __init early_parse_mem(char *p) @@ -177,40 +169,6 @@ static inline void __init reserve_crashkernel(void) {} #endif -void __init __add_active_range(unsigned int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - struct resource *res = &mem_resources[nid]; - - WARN_ON(res->name); /* max one active range per node for now */ - - res->name = "System RAM"; - res->start = start_pfn << PAGE_SHIFT; - res->end = (end_pfn << PAGE_SHIFT) - 1; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - if (request_resource(&iomem_resource, res)) { - pr_err("unable to request memory_resource 0x%lx 0x%lx\n", - start_pfn, end_pfn); - return; - } - - /* - * We don't know which RAM region contains kernel data, - * so we try it repeatedly and let the resource manager - * test it. - */ - request_resource(res, &code_resource); - request_resource(res, &data_resource); - request_resource(res, &bss_resource); - -#ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) - request_resource(res, &crashk_res); -#endif - - add_active_range(nid, start_pfn, end_pfn); -} - void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; @@ -223,7 +181,7 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, min_low_pfn, max_low_pfn); - __add_active_range(0, min_low_pfn, max_low_pfn); + add_active_range(0, min_low_pfn, max_low_pfn); register_bootmem_low_pages(); node_set_online(0); @@ -309,8 +267,6 @@ void __init setup_arch(char **cmdline_p) code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; - bss_resource.start = virt_to_phys(__bss_start); - bss_resource.end = virt_to_phys(_ebss)-1; memory_start = (unsigned long)__va(__MEMORY_START); if (!memory_end) diff --git a/trunk/arch/sh/kernel/sh_ksyms_32.c b/trunk/arch/sh/kernel/sh_ksyms_32.c index 8f916536719c..6d405462cee8 100644 --- a/trunk/arch/sh/kernel/sh_ksyms_32.c +++ b/trunk/arch/sh/kernel/sh_ksyms_32.c @@ -20,6 +20,8 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); extern struct hw_interrupt_type no_irq_type; +EXPORT_SYMBOL(sh_mv); + /* platform dependent support */ EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(kernel_thread); diff --git a/trunk/arch/sh/kernel/sh_ksyms_64.c b/trunk/arch/sh/kernel/sh_ksyms_64.c index 9324d32adacc..a310c9707f03 100644 --- a/trunk/arch/sh/kernel/sh_ksyms_64.c +++ b/trunk/arch/sh/kernel/sh_ksyms_64.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -30,50 +29,25 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(kernel_thread); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) -EXPORT_SYMBOL(clear_user_page); -#endif - -#ifndef CONFIG_CACHE_OFF -EXPORT_SYMBOL(flush_dcache_page); -#endif - /* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); -#ifdef CONFIG_IPV6 -EXPORT_SYMBOL(csum_ipv6_magic); -#endif #ifdef CONFIG_VT EXPORT_SYMBOL(screen_info); #endif -EXPORT_SYMBOL(__put_user_asm_b); -EXPORT_SYMBOL(__put_user_asm_w); EXPORT_SYMBOL(__put_user_asm_l); -EXPORT_SYMBOL(__put_user_asm_q); -EXPORT_SYMBOL(__get_user_asm_b); -EXPORT_SYMBOL(__get_user_asm_w); EXPORT_SYMBOL(__get_user_asm_l); -EXPORT_SYMBOL(__get_user_asm_q); -EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(clear_page); -EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); -EXPORT_SYMBOL(__const_udelay); /* Ugh. These come in from libgcc.a at link time. */ #define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name) DECLARE_EXPORT(__sdivsi3); -DECLARE_EXPORT(__sdivsi3_2); DECLARE_EXPORT(__muldi3); DECLARE_EXPORT(__udivsi3); -DECLARE_EXPORT(__div_table); diff --git a/trunk/arch/sh/kernel/time_64.c b/trunk/arch/sh/kernel/time_64.c index 022a55f1c1d4..898977ee2030 100644 --- a/trunk/arch/sh/kernel/time_64.c +++ b/trunk/arch/sh/kernel/time_64.c @@ -172,7 +172,6 @@ void do_gettimeofday(struct timeval *tv) tv->tv_sec = sec; tv->tv_usec = usec; } -EXPORT_SYMBOL(do_gettimeofday); int do_settimeofday(struct timespec *tv) { @@ -241,7 +240,7 @@ static inline void do_timer_interrupt(void) * the irq version of write_lock because as just said we have irq * locally disabled. -arca */ - write_seqlock(&xtime_lock); + write_lock(&xtime_lock); asm ("getcon cr62, %0" : "=r" (current_ctc)); ctc_last_interrupt = (unsigned long) current_ctc; @@ -267,7 +266,7 @@ static inline void do_timer_interrupt(void) /* do it again in 60 s */ last_rtc_update = xtime.tv_sec - 600; } - write_sequnlock(&xtime_lock); + write_unlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); diff --git a/trunk/arch/sh/lib64/dbg.c b/trunk/arch/sh/lib64/dbg.c index 2fb8eaf6de60..75825ef6e084 100644 --- a/trunk/arch/sh/lib64/dbg.c +++ b/trunk/arch/sh/lib64/dbg.c @@ -186,8 +186,8 @@ void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs) rr->pc = regs->pc; if (sp < stack_bottom + 3092) { - int i, j; printk("evt_debug : stack underflow report\n"); + int i, j; for (j=0, i = event_ptr; j<16; j++) { rr = event_ring + i; printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n", diff --git a/trunk/arch/sh/mm/Makefile_64 b/trunk/arch/sh/mm/Makefile_64 index 0d92a8a3ac9a..cbd6aa33c5ac 100644 --- a/trunk/arch/sh/mm/Makefile_64 +++ b/trunk/arch/sh/mm/Makefile_64 @@ -2,11 +2,10 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # -obj-y := init.o consistent.o +obj-y := init.o extable_64.o consistent.o -mmu-y := tlb-nommu.o pg-nommu.o extable_32.o -mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ - extable_64.o +mmu-y := tlb-nommu.o pg-nommu.o +mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o ifndef CONFIG_CACHE_OFF obj-y += cache-sh5.o diff --git a/trunk/arch/sh/mm/cache-sh5.c b/trunk/arch/sh/mm/cache-sh5.c index 9e277ec7d536..3877321fcede 100644 --- a/trunk/arch/sh/mm/cache-sh5.c +++ b/trunk/arch/sh/mm/cache-sh5.c @@ -714,7 +714,6 @@ void flush_cache_sigtramp(unsigned long vaddr) sh64_icache_inv_current_user_range(vaddr, end); } -#ifdef CONFIG_MMU /* * These *MUST* lie in an area of virtual address space that's otherwise * unused. @@ -831,4 +830,3 @@ void clear_user_page(void *to, unsigned long address, struct page *page) else sh64_clear_user_page_coloured(to, address); } -#endif diff --git a/trunk/arch/sh/mm/ioremap_64.c b/trunk/arch/sh/mm/ioremap_64.c index 6e0be24d26e2..cea224c3e49b 100644 --- a/trunk/arch/sh/mm/ioremap_64.c +++ b/trunk/arch/sh/mm/ioremap_64.c @@ -343,7 +343,6 @@ unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *n return shmedia_alloc_io(phys, size, name); } -EXPORT_SYMBOL(onchip_remap); void onchip_unmap(unsigned long vaddr) { @@ -371,7 +370,6 @@ void onchip_unmap(unsigned long vaddr) kfree(res); } } -EXPORT_SYMBOL(onchip_unmap); #ifdef CONFIG_PROC_FS static int diff --git a/trunk/arch/sh/mm/numa.c b/trunk/arch/sh/mm/numa.c index 1663199ce888..2de7302724fc 100644 --- a/trunk/arch/sh/mm/numa.c +++ b/trunk/arch/sh/mm/numa.c @@ -59,7 +59,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) free_pfn = start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; - __add_active_range(nid, start_pfn, end_pfn); + add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ NODE_DATA(nid) = pfn_to_kaddr(free_pfn); diff --git a/trunk/arch/sh/tools/mach-types b/trunk/arch/sh/tools/mach-types index 1bba7d36be90..987c6682bf99 100644 --- a/trunk/arch/sh/tools/mach-types +++ b/trunk/arch/sh/tools/mach-types @@ -28,6 +28,7 @@ HD64465 HD64465 7751SYSTEMH SH_7751_SYSTEMH HP6XX SH_HP6XX DREAMCAST SH_DREAMCAST +MPC1211 SH_MPC1211 SNAPGEAR SH_SECUREEDGE5410 EDOSK7705 SH_EDOSK7705 SH4202_MICRODEV SH_SH4202_MICRODEV diff --git a/trunk/arch/um/drivers/line.c b/trunk/arch/um/drivers/line.c index 5047490fc299..10b86e1cc659 100644 --- a/trunk/arch/um/drivers/line.c +++ b/trunk/arch/um/drivers/line.c @@ -191,9 +191,9 @@ void line_flush_chars(struct tty_struct *tty) line_flush_buffer(tty); } -int line_put_char(struct tty_struct *tty, unsigned char ch) +void line_put_char(struct tty_struct *tty, unsigned char ch) { - return line_write(tty, &ch, sizeof(ch)); + line_write(tty, &ch, sizeof(ch)); } int line_write(struct tty_struct *tty, const unsigned char *buf, int len) diff --git a/trunk/arch/um/include/line.h b/trunk/arch/um/include/line.h index 979b73e6352d..1223f2c844b4 100644 --- a/trunk/arch/um/include/line.h +++ b/trunk/arch/um/include/line.h @@ -71,7 +71,7 @@ extern int line_setup(struct line *lines, unsigned int sizeof_lines, char *init, char **error_out); extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len); -extern int line_put_char(struct tty_struct *tty, unsigned char ch); +extern void line_put_char(struct tty_struct *tty, unsigned char ch); extern void line_set_termios(struct tty_struct *tty, struct ktermios * old); extern int line_chars_in_buffer(struct tty_struct *tty); extern void line_flush_buffer(struct tty_struct *tty); diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index fe361ae7ef2f..c3f880902d66 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -18,7 +18,6 @@ config X86_64 ### Arch settings config X86 def_bool y - select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE select HAVE_KPROBES @@ -335,7 +334,6 @@ config X86_RDC321X select GENERIC_GPIO select LEDS_CLASS select LEDS_GPIO - select NEW_LEDS help This option is needed for RDC R-321x system-on-chip, also known as R-8610-(G). diff --git a/trunk/arch/x86/boot/compressed/relocs.c b/trunk/arch/x86/boot/compressed/relocs.c index edaadea90aaf..d01ea42187e6 100644 --- a/trunk/arch/x86/boot/compressed/relocs.c +++ b/trunk/arch/x86/boot/compressed/relocs.c @@ -191,7 +191,7 @@ static void read_ehdr(FILE *fp) die("Cannot read ELF header: %s\n", strerror(errno)); } - if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) { + if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) { die("No ELF magic\n"); } if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) { diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 5e618c3b4720..bbdacb398d48 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -83,7 +83,9 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o -obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o +ifdef CONFIG_INPUT_PCSPKR +obj-y += pcspeaker.o +endif obj-$(CONFIG_SCx200) += scx200.o scx200-y += scx200_32.o diff --git a/trunk/arch/x86/kernel/acpi/Makefile b/trunk/arch/x86/kernel/acpi/Makefile index fd5ca97a2ad5..7335959b6aff 100644 --- a/trunk/arch/x86/kernel/acpi/Makefile +++ b/trunk/arch/x86/kernel/acpi/Makefile @@ -10,5 +10,5 @@ endif $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin $(obj)/realmode/wakeup.bin: FORCE - $(Q)$(MAKE) $(build)=$(obj)/realmode + $(Q)$(MAKE) $(build)=$(obj)/realmode $@ diff --git a/trunk/arch/x86/kernel/acpi/realmode/Makefile b/trunk/arch/x86/kernel/acpi/realmode/Makefile index 1c31cc0e9def..092900854acc 100644 --- a/trunk/arch/x86/kernel/acpi/realmode/Makefile +++ b/trunk/arch/x86/kernel/acpi/realmode/Makefile @@ -6,8 +6,7 @@ # for more details. # -always := wakeup.bin -targets := wakeup.elf wakeup.lds +targets := wakeup.bin wakeup.elf wakeup-y += wakeup.o wakemain.o video-mode.o copy.o @@ -49,7 +48,7 @@ LDFLAGS_wakeup.elf := -T CPPFLAGS_wakeup.lds += -P -C -$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE +$(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE $(call if_changed,ld) OBJCOPYFLAGS_wakeup.bin := -O binary diff --git a/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c b/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c index c2e1ce33c7cb..238468ae1993 100644 --- a/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -6,7 +6,6 @@ #include -#include #include struct cpuid_bit { @@ -49,23 +48,3 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) set_cpu_cap(c, cb->feature); } } - -#ifdef CONFIG_X86_PAT -void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) -{ - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - if (c->x86 >= 0xf && c->x86 <= 0x11) - return; - break; - case X86_VENDOR_INTEL: - if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) - return; - break; - } - - pat_disable(cpu_has_pat ? - "PAT disabled. Not yet verified on this CPU type." : - "PAT not supported by CPU."); -} -#endif diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index d0463a946247..35b4f6a9c8ef 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -12,7 +12,6 @@ #include #include #include -#include #ifdef CONFIG_X86_LOCAL_APIC #include #include @@ -309,6 +308,19 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) } + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } + } /* @@ -397,6 +409,18 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); } + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) @@ -627,7 +651,6 @@ void __init early_cpu_init(void) cpu_devs[cvdev->vendor] = cvdev->cpu_dev; early_cpu_detect(); - validate_pat_support(&boot_cpu_data); } /* Make sure %fs is initialized properly in idle threads */ diff --git a/trunk/arch/x86/kernel/geode_32.c b/trunk/arch/x86/kernel/geode_32.c index e8edd63ab000..9dad6ca6cd70 100644 --- a/trunk/arch/x86/kernel/geode_32.c +++ b/trunk/arch/x86/kernel/geode_32.c @@ -161,25 +161,6 @@ void geode_gpio_setup_event(unsigned int gpio, int pair, int pme) } EXPORT_SYMBOL_GPL(geode_gpio_setup_event); -int geode_has_vsa2(void) -{ - static int has_vsa2 = -1; - - if (has_vsa2 == -1) { - /* - * The VSA has virtual registers that we can query for a - * signature. - */ - outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); - outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); - - has_vsa2 = (inw(VSA_VRC_DATA) == VSA_SIG); - } - - return has_vsa2; -} -EXPORT_SYMBOL_GPL(geode_has_vsa2); - static int __init geode_southbridge_init(void) { if (!is_geode()) diff --git a/trunk/arch/x86/kernel/i387.c b/trunk/arch/x86/kernel/i387.c index e03cc952f233..db6839b53195 100644 --- a/trunk/arch/x86/kernel/i387.c +++ b/trunk/arch/x86/kernel/i387.c @@ -450,6 +450,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; + clear_fpu(tsk); return __copy_from_user(&tsk->thread.xstate->fsave, buf, sizeof(struct i387_fsave_struct)); } @@ -460,6 +461,7 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) struct user_i387_ia32_struct env; int err; + clear_fpu(tsk); err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], sizeof(struct i387_fxsave_struct)); /* mxcsr reserved bits must be masked to zero for security reasons */ @@ -476,16 +478,6 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) int err; if (HAVE_HWFP) { - struct task_struct *tsk = current; - - clear_fpu(tsk); - - if (!used_math()) { - err = init_fpu(tsk); - if (err) - return err; - } - if (cpu_has_fxsr) err = restore_i387_fxsave(buf); else diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c index 4bc1be5d5472..ddee04043aeb 100644 --- a/trunk/arch/x86/kernel/kvmclock.c +++ b/trunk/arch/x86/kernel/kvmclock.c @@ -133,7 +133,6 @@ static int kvm_register_clock(void) return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high); } -#ifdef CONFIG_X86_LOCAL_APIC static void kvm_setup_secondary_clock(void) { /* @@ -144,7 +143,6 @@ static void kvm_setup_secondary_clock(void) /* ok, done with our trickery, call native */ setup_secondary_APIC_clock(); } -#endif /* * After the clock is registered, the host will keep writing to the @@ -179,9 +177,7 @@ void __init kvmclock_init(void) pv_time_ops.get_wallclock = kvm_get_wallclock; pv_time_ops.set_wallclock = kvm_set_wallclock; pv_time_ops.sched_clock = kvm_clock_read; -#ifdef CONFIG_X86_LOCAL_APIC pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; -#endif machine_ops.shutdown = kvm_shutdown; #ifdef CONFIG_KEXEC machine_ops.crash_shutdown = kvm_crash_shutdown; diff --git a/trunk/arch/x86/kernel/mpparse.c b/trunk/arch/x86/kernel/mpparse.c index 404683b94e79..3e2c54dc8b29 100644 --- a/trunk/arch/x86/kernel/mpparse.c +++ b/trunk/arch/x86/kernel/mpparse.c @@ -794,11 +794,6 @@ void __init find_smp_config(void) ACPI-based MP Configuration -------------------------------------------------------------------------- */ -/* - * Keep this outside and initialized to 0, for !CONFIG_ACPI builds: - */ -int es7000_plat; - #ifdef CONFIG_ACPI #ifdef CONFIG_X86_IO_APIC @@ -914,6 +909,8 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) MP_intsrc_info(&intsrc); } +int es7000_plat; + void __init mp_config_acpi_legacy_irqs(void) { struct mpc_config_intsrc intsrc; diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index f6be7d5f82f8..07c6d42ab5ff 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -149,6 +149,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + DMI_MATCH(DMI_BOARD_NAME, "0WF810"), }, }, { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/ diff --git a/trunk/arch/x86/kernel/setup_32.c b/trunk/arch/x86/kernel/setup_32.c index 2c5f8b213e86..2283422af794 100644 --- a/trunk/arch/x86/kernel/setup_32.c +++ b/trunk/arch/x86/kernel/setup_32.c @@ -127,12 +127,7 @@ static struct resource standard_io_resources[] = { { }, { .name = "keyboard", .start = 0x0060, - .end = 0x0060, - .flags = IORESOURCE_BUSY | IORESOURCE_IO -}, { - .name = "keyboard", - .start = 0x0064, - .end = 0x0064, + .end = 0x006f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", diff --git a/trunk/arch/x86/kernel/setup_64.c b/trunk/arch/x86/kernel/setup_64.c index f2fc8feb727d..22c14e21c97c 100644 --- a/trunk/arch/x86/kernel/setup_64.c +++ b/trunk/arch/x86/kernel/setup_64.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #ifdef CONFIG_PARAVIRT @@ -129,9 +128,7 @@ static struct resource standard_io_resources[] = { .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer1", .start = 0x50, .end = 0x53, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, - { .name = "keyboard", .start = 0x60, .end = 0x60, - .flags = IORESOURCE_BUSY | IORESOURCE_IO }, - { .name = "keyboard", .start = 0x64, .end = 0x64, + { .name = "keyboard", .start = 0x60, .end = 0x6f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", .start = 0x80, .end = 0x8f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, @@ -1066,19 +1063,25 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x80000007) c->x86_power = cpuid_edx(0x80000007); + + clear_cpu_cap(c, X86_FEATURE_PAT); + switch (c->x86_vendor) { case X86_VENDOR_AMD: early_init_amd(c); + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); break; case X86_VENDOR_INTEL: early_init_intel(c); + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); break; case X86_VENDOR_CENTAUR: early_init_centaur(c); break; } - validate_pat_support(c); } /* diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 6b087ab6cd8f..84241a256dc8 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -299,7 +299,7 @@ static void __cpuinit smp_callin(void) /* * Activate a secondary processor. */ -static void __cpuinit start_secondary(void *unused) +void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too @@ -1306,7 +1306,7 @@ static void remove_siblinginfo(int cpu) cpu_clear(cpu, cpu_sibling_setup_map); } -static int additional_cpus __initdata = -1; +int additional_cpus __initdata = -1; static __init int setup_additional_cpus(char *s) { diff --git a/trunk/arch/x86/kvm/i8254.c b/trunk/arch/x86/kvm/i8254.c index 3324d90038e4..4c943eabacc3 100644 --- a/trunk/arch/x86/kvm/i8254.c +++ b/trunk/arch/x86/kvm/i8254.c @@ -288,8 +288,6 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) * mode 1 is one shot, mode 2 is period, otherwise del timer */ switch (ps->channels[0].mode) { case 1: - /* FIXME: enhance mode 4 precision */ - case 4: create_pit_timer(&ps->pit_timer, val, 0); break; case 2: diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 36c5406b1813..2ad6f5481671 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -79,6 +79,36 @@ static int dbg = 1; } #endif +#define PT64_PT_BITS 9 +#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) +#define PT32_PT_BITS 10 +#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) + +#define PT_WRITABLE_SHIFT 1 + +#define PT_PRESENT_MASK (1ULL << 0) +#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) +#define PT_USER_MASK (1ULL << 2) +#define PT_PWT_MASK (1ULL << 3) +#define PT_PCD_MASK (1ULL << 4) +#define PT_ACCESSED_MASK (1ULL << 5) +#define PT_DIRTY_MASK (1ULL << 6) +#define PT_PAGE_SIZE_MASK (1ULL << 7) +#define PT_PAT_MASK (1ULL << 7) +#define PT_GLOBAL_MASK (1ULL << 8) +#define PT64_NX_SHIFT 63 +#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) + +#define PT_PAT_SHIFT 7 +#define PT_DIR_PAT_SHIFT 12 +#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) + +#define PT32_DIR_PSE36_SIZE 4 +#define PT32_DIR_PSE36_SHIFT 13 +#define PT32_DIR_PSE36_MASK \ + (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) + + #define PT_FIRST_AVAIL_BITS_SHIFT 9 #define PT64_SECOND_AVAIL_BITS_SHIFT 52 @@ -124,6 +154,10 @@ static int dbg = 1; #define PFERR_USER_MASK (1U << 2) #define PFERR_FETCH_MASK (1U << 4) +#define PT64_ROOT_LEVEL 4 +#define PT32_ROOT_LEVEL 2 +#define PT32E_ROOT_LEVEL 3 + #define PT_DIRECTORY_LEVEL 2 #define PT_PAGE_TABLE_LEVEL 1 @@ -152,12 +186,6 @@ static struct kmem_cache *mmu_page_header_cache; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; -static u64 __read_mostly shadow_base_present_pte; -static u64 __read_mostly shadow_nx_mask; -static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ -static u64 __read_mostly shadow_user_mask; -static u64 __read_mostly shadow_accessed_mask; -static u64 __read_mostly shadow_dirty_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { @@ -166,23 +194,6 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); -void kvm_mmu_set_base_ptes(u64 base_pte) -{ - shadow_base_present_pte = base_pte; -} -EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); - -void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask) -{ - shadow_user_mask = user_mask; - shadow_accessed_mask = accessed_mask; - shadow_dirty_mask = dirty_mask; - shadow_nx_mask = nx_mask; - shadow_x_mask = x_mask; -} -EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); - static int is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->arch.cr0 & X86_CR0_WP; @@ -221,7 +232,7 @@ static int is_writeble_pte(unsigned long pte) static int is_dirty_pte(unsigned long pte) { - return pte & shadow_dirty_mask; + return pte & PT_DIRTY_MASK; } static int is_rmap_pte(u64 pte) @@ -376,6 +387,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); *write_count += 1; + WARN_ON(*write_count > KVM_PAGES_PER_HPAGE); } static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) @@ -535,7 +547,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) return; sp = page_header(__pa(spte)); pfn = spte_to_pfn(*spte); - if (*spte & shadow_accessed_mask) + if (*spte & PT_ACCESSED_MASK) kvm_set_pfn_accessed(pfn); if (is_writeble_pte(*spte)) kvm_release_pfn_dirty(pfn); @@ -1061,17 +1073,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, * whether the guest actually used the pte (in order to detect * demand paging). */ - spte = shadow_base_present_pte | shadow_dirty_mask; + spte = PT_PRESENT_MASK | PT_DIRTY_MASK; if (!speculative) pte_access |= PT_ACCESSED_MASK; if (!dirty) pte_access &= ~ACC_WRITE_MASK; - if (pte_access & ACC_EXEC_MASK) - spte |= shadow_x_mask; - else - spte |= shadow_nx_mask; + if (!(pte_access & ACC_EXEC_MASK)) + spte |= PT64_NX_MASK; + + spte |= PT_PRESENT_MASK; if (pte_access & ACC_USER_MASK) - spte |= shadow_user_mask; + spte |= PT_USER_MASK; if (largepage) spte |= PT_PAGE_SIZE_MASK; @@ -1176,9 +1188,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, return -ENOMEM; } - table[index] = __pa(new_table->spt) - | PT_PRESENT_MASK | PT_WRITABLE_MASK - | shadow_user_mask | shadow_x_mask; + table[index] = __pa(new_table->spt) | PT_PRESENT_MASK + | PT_WRITABLE_MASK | PT_USER_MASK; } table_addr = table[index] & PT64_BASE_ADDR_MASK; } @@ -1233,6 +1244,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; spin_lock(&vcpu->kvm->mmu_lock); +#ifdef CONFIG_X86_64 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -1244,6 +1256,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->kvm->mmu_lock); return; } +#endif for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; @@ -1269,6 +1282,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; +#ifdef CONFIG_X86_64 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -1283,6 +1297,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu.root_hpa = root; return; } +#endif metaphysical = !is_paging(vcpu); if (tdp_enabled) metaphysical = 1; @@ -1362,7 +1377,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, - largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); + largepage, gfn, pfn, TDP_ROOT_LEVEL); spin_unlock(&vcpu->kvm->mmu_lock); return r; @@ -1469,7 +1484,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->page_fault = tdp_page_fault; context->free = nonpaging_free; context->prefetch_page = nonpaging_prefetch_page; - context->shadow_root_level = kvm_x86_ops->get_tdp_level(); + context->shadow_root_level = TDP_ROOT_LEVEL; context->root_hpa = INVALID_PAGE; if (!is_paging(vcpu)) { @@ -1618,7 +1633,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->arch.last_pte_updated; - return !!(spte && (*spte & shadow_accessed_mask)); + return !!(spte && (*spte & PT_ACCESSED_MASK)); } static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, diff --git a/trunk/arch/x86/kvm/mmu.h b/trunk/arch/x86/kvm/mmu.h index 1730757bbc7a..e64e9f56a65e 100644 --- a/trunk/arch/x86/kvm/mmu.h +++ b/trunk/arch/x86/kvm/mmu.h @@ -3,38 +3,11 @@ #include -#define PT64_PT_BITS 9 -#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) -#define PT32_PT_BITS 10 -#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) - -#define PT_WRITABLE_SHIFT 1 - -#define PT_PRESENT_MASK (1ULL << 0) -#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) -#define PT_USER_MASK (1ULL << 2) -#define PT_PWT_MASK (1ULL << 3) -#define PT_PCD_MASK (1ULL << 4) -#define PT_ACCESSED_MASK (1ULL << 5) -#define PT_DIRTY_MASK (1ULL << 6) -#define PT_PAGE_SIZE_MASK (1ULL << 7) -#define PT_PAT_MASK (1ULL << 7) -#define PT_GLOBAL_MASK (1ULL << 8) -#define PT64_NX_SHIFT 63 -#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) - -#define PT_PAT_SHIFT 7 -#define PT_DIR_PAT_SHIFT 12 -#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) - -#define PT32_DIR_PSE36_SIZE 4 -#define PT32_DIR_PSE36_SHIFT 13 -#define PT32_DIR_PSE36_MASK \ - (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) - -#define PT64_ROOT_LEVEL 4 -#define PT32_ROOT_LEVEL 2 -#define PT32E_ROOT_LEVEL 3 +#ifdef CONFIG_X86_64 +#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL +#else +#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL +#endif static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { diff --git a/trunk/arch/x86/kvm/svm.c b/trunk/arch/x86/kvm/svm.c index ab22615eee89..89e0be2c10d0 100644 --- a/trunk/arch/x86/kvm/svm.c +++ b/trunk/arch/x86/kvm/svm.c @@ -1863,15 +1863,6 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } -static int get_npt_level(void) -{ -#ifdef CONFIG_X86_64 - return PT64_ROOT_LEVEL; -#else - return PT32E_ROOT_LEVEL; -#endif -} - static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -1929,7 +1920,6 @@ static struct kvm_x86_ops svm_x86_ops = { .inject_pending_vectors = do_interrupt_requests, .set_tss_addr = svm_set_tss_addr, - .get_tdp_level = get_npt_level, }; static int __init svm_init(void) diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index bfe4db11989c..8e5d6645b90d 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -42,9 +42,6 @@ module_param(enable_vpid, bool, 0); static int flexpriority_enabled = 1; module_param(flexpriority_enabled, bool, 0); -static int enable_ept = 1; -module_param(enable_ept, bool, 0); - struct vmcs { u32 revision_id; u32 abort; @@ -87,7 +84,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_vmx, vcpu); } -static int init_rmode(struct kvm *kvm); +static int init_rmode_tss(struct kvm *kvm); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -110,11 +107,6 @@ static struct vmcs_config { u32 vmentry_ctrl; } vmcs_config; -struct vmx_capability { - u32 ept; - u32 vpid; -} vmx_capability; - #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ @@ -222,32 +214,6 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } -static inline int cpu_has_vmx_invept_individual_addr(void) -{ - return (!!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT)); -} - -static inline int cpu_has_vmx_invept_context(void) -{ - return (!!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT)); -} - -static inline int cpu_has_vmx_invept_global(void) -{ - return (!!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT)); -} - -static inline int cpu_has_vmx_ept(void) -{ - return (vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_ENABLE_EPT); -} - -static inline int vm_need_ept(void) -{ - return (cpu_has_vmx_ept() && enable_ept); -} - static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) { return ((cpu_has_vmx_virtualize_apic_accesses()) && @@ -284,18 +250,6 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) : : "a"(&operand), "c"(ext) : "cc", "memory"); } -static inline void __invept(int ext, u64 eptp, gpa_t gpa) -{ - struct { - u64 eptp, gpa; - } operand = {eptp, gpa}; - - asm volatile (ASM_VMX_INVEPT - /* CF==1 or ZF==1 --> rc = -1 */ - "; ja 1f ; ud2 ; 1:\n" - : : "a" (&operand), "c" (ext) : "cc", "memory"); -} - static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -347,33 +301,6 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); } -static inline void ept_sync_global(void) -{ - if (cpu_has_vmx_invept_global()) - __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); -} - -static inline void ept_sync_context(u64 eptp) -{ - if (vm_need_ept()) { - if (cpu_has_vmx_invept_context()) - __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); - else - ept_sync_global(); - } -} - -static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) -{ - if (vm_need_ept()) { - if (cpu_has_vmx_invept_individual_addr()) - __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, - eptp, gpa); - else - ept_sync_context(eptp); - } -} - static unsigned long vmcs_readl(unsigned long field) { unsigned long value; @@ -461,8 +388,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb |= 1u << 1; if (vcpu->arch.rmode.active) eb = ~0; - if (vm_need_ept()) - eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -1060,7 +985,7 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; - u32 min, opt, min2, opt2; + u32 min, opt; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; @@ -1078,8 +1003,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING; @@ -1095,13 +1018,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { - min2 = 0; - opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + min = 0; + opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_WBINVD_EXITING | - SECONDARY_EXEC_ENABLE_VPID | - SECONDARY_EXEC_ENABLE_EPT; - if (adjust_vmx_controls(min2, opt2, - MSR_IA32_VMX_PROCBASED_CTLS2, + SECONDARY_EXEC_ENABLE_VPID; + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } @@ -1110,16 +1031,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif - if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { - /* CR3 accesses don't need to cause VM Exits when EPT enabled */ - min &= ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING); - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, - &_cpu_based_exec_control) < 0) - return -EIO; - rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, - vmx_capability.ept, vmx_capability.vpid); - } min = 0; #ifdef CONFIG_X86_64 @@ -1345,7 +1256,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); kvm_mmu_reset_context(vcpu); - init_rmode(vcpu->kvm); + init_rmode_tss(vcpu->kvm); } #ifdef CONFIG_X86_64 @@ -1393,64 +1304,8 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; } -static void ept_load_pdptrs(struct kvm_vcpu *vcpu) -{ - if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - if (!load_pdptrs(vcpu, vcpu->arch.cr3)) { - printk(KERN_ERR "EPT: Fail to load pdptrs!\n"); - return; - } - vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); - vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); - vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); - vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); - } -} - -static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); - -static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, - unsigned long cr0, - struct kvm_vcpu *vcpu) -{ - if (!(cr0 & X86_CR0_PG)) { - /* From paging/starting to nonpaging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_config.cpu_based_exec_ctrl | - (CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); - vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, vcpu->arch.cr4); - *hw_cr0 |= X86_CR0_PE | X86_CR0_PG; - *hw_cr0 &= ~X86_CR0_WP; - } else if (!is_paging(vcpu)) { - /* From nonpaging to paging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_config.cpu_based_exec_ctrl & - ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); - vcpu->arch.cr0 = cr0; - vmx_set_cr4(vcpu, vcpu->arch.cr4); - if (!(vcpu->arch.cr0 & X86_CR0_WP)) - *hw_cr0 &= ~X86_CR0_WP; - } -} - -static void ept_update_paging_mode_cr4(unsigned long *hw_cr4, - struct kvm_vcpu *vcpu) -{ - if (!is_paging(vcpu)) { - *hw_cr4 &= ~X86_CR4_PAE; - *hw_cr4 |= X86_CR4_PSE; - } else if (!(vcpu->arch.cr4 & X86_CR4_PAE)) - *hw_cr4 &= ~X86_CR4_PAE; -} - static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { - unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | - KVM_VM_CR0_ALWAYS_ON; - vmx_fpu_deactivate(vcpu); if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) @@ -1468,61 +1323,29 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } #endif - if (vm_need_ept()) - ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); - vmcs_writel(CR0_READ_SHADOW, cr0); - vmcs_writel(GUEST_CR0, hw_cr0); + vmcs_writel(GUEST_CR0, + (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); vcpu->arch.cr0 = cr0; if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) vmx_fpu_activate(vcpu); } -static u64 construct_eptp(unsigned long root_hpa) -{ - u64 eptp; - - /* TODO write the value reading from MSR */ - eptp = VMX_EPT_DEFAULT_MT | - VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; - eptp |= (root_hpa & PAGE_MASK); - - return eptp; -} - static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { - unsigned long guest_cr3; - u64 eptp; - - guest_cr3 = cr3; - if (vm_need_ept()) { - eptp = construct_eptp(cr3); - vmcs_write64(EPT_POINTER, eptp); - ept_sync_context(eptp); - ept_load_pdptrs(vcpu); - guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : - VMX_EPT_IDENTITY_PAGETABLE_ADDR; - } - vmx_flush_tlb(vcpu); - vmcs_writel(GUEST_CR3, guest_cr3); + vmcs_writel(GUEST_CR3, cr3); if (vcpu->arch.cr0 & X86_CR0_PE) vmx_fpu_deactivate(vcpu); } static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ? - KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); - - vcpu->arch.cr4 = cr4; - if (vm_need_ept()) - ept_update_paging_mode_cr4(&hw_cr4, vcpu); - vmcs_writel(CR4_READ_SHADOW, cr4); - vmcs_writel(GUEST_CR4, hw_cr4); + vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ? + KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); + vcpu->arch.cr4 = cr4; } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) @@ -1707,41 +1530,6 @@ static int init_rmode_tss(struct kvm *kvm) return ret; } -static int init_rmode_identity_map(struct kvm *kvm) -{ - int i, r, ret; - pfn_t identity_map_pfn; - u32 tmp; - - if (!vm_need_ept()) - return 1; - if (unlikely(!kvm->arch.ept_identity_pagetable)) { - printk(KERN_ERR "EPT: identity-mapping pagetable " - "haven't been allocated!\n"); - return 0; - } - if (likely(kvm->arch.ept_identity_pagetable_done)) - return 1; - ret = 0; - identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT; - r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); - if (r < 0) - goto out; - /* Set up identity-mapping pagetable for EPT in real mode */ - for (i = 0; i < PT32_ENT_PER_PAGE; i++) { - tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); - r = kvm_write_guest_page(kvm, identity_map_pfn, - &tmp, i * sizeof(tmp), sizeof(tmp)); - if (r < 0) - goto out; - } - kvm->arch.ept_identity_pagetable_done = true; - ret = 1; -out: - return ret; -} - static void seg_setup(int seg) { struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; @@ -1776,31 +1564,6 @@ static int alloc_apic_access_page(struct kvm *kvm) return r; } -static int alloc_identity_pagetable(struct kvm *kvm) -{ - struct kvm_userspace_memory_region kvm_userspace_mem; - int r = 0; - - down_write(&kvm->slots_lock); - if (kvm->arch.ept_identity_pagetable) - goto out; - kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; - kvm_userspace_mem.flags = 0; - kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; - kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); - if (r) - goto out; - - down_read(¤t->mm->mmap_sem); - kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, - VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); - up_read(¤t->mm->mmap_sem); -out: - up_write(&kvm->slots_lock); - return r; -} - static void allocate_vpid(struct vcpu_vmx *vmx) { int vpid; @@ -1875,9 +1638,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) CPU_BASED_CR8_LOAD_EXITING; #endif } - if (!vm_need_ept()) - exec_control |= CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_CR3_LOAD_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); if (cpu_has_secondary_exec_ctrls()) { @@ -1887,8 +1647,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; - if (!vm_need_ept()) - exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } @@ -1964,15 +1722,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) return 0; } -static int init_rmode(struct kvm *kvm) -{ - if (!init_rmode_tss(kvm)) - return 0; - if (!init_rmode_identity_map(kvm)) - return 0; - return 1; -} - static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -1980,7 +1729,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) int ret; down_read(&vcpu->kvm->slots_lock); - if (!init_rmode(vmx->vcpu.kvm)) { + if (!init_rmode_tss(vmx->vcpu.kvm)) { ret = -ENOMEM; goto out; } @@ -2245,9 +1994,6 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); if (is_page_fault(intr_info)) { - /* EPT won't cause page fault directly */ - if (vm_need_ept()) - BUG(); cr2 = vmcs_readl(EXIT_QUALIFICATION); KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, (u32)((u64)cr2 >> 32), handler); @@ -2577,64 +2323,6 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return kvm_task_switch(vcpu, tss_selector, reason); } -static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - u64 exit_qualification; - enum emulation_result er; - gpa_t gpa; - unsigned long hva; - int gla_validity; - int r; - - exit_qualification = vmcs_read64(EXIT_QUALIFICATION); - - if (exit_qualification & (1 << 6)) { - printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); - return -ENOTSUPP; - } - - gla_validity = (exit_qualification >> 7) & 0x3; - if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { - printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); - printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", - (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), - (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); - printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", - (long unsigned int)exit_qualification); - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 0; - return -ENOTSUPP; - } - - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); - if (!kvm_is_error_hva(hva)) { - r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); - if (r < 0) { - printk(KERN_ERR "EPT: Not enough memory!\n"); - return -ENOMEM; - } - return 1; - } else { - /* must be MMIO */ - er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); - - if (er == EMULATE_FAIL) { - printk(KERN_ERR - "EPT: Fail to handle EPT violation vmexit!er is %d\n", - er); - printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", - (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), - (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); - printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", - (long unsigned int)exit_qualification); - return -ENOTSUPP; - } else if (er == EMULATE_DO_MMIO) - return 0; - } - return 1; -} - /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -2658,7 +2346,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, - [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, }; static const int kvm_vmx_max_exit_handlers = @@ -2677,13 +2364,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); - /* Access CR3 don't cause VMExit in paging mode, so we need - * to sync with guest real CR3. */ - if (vm_need_ept() && is_paging(vcpu)) { - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - ept_load_pdptrs(vcpu); - } - if (unlikely(vmx->fail)) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason @@ -2692,8 +2372,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) } if ((vectoring_info & VECTORING_INFO_VALID_MASK) && - (exit_reason != EXIT_REASON_EXCEPTION_NMI && - exit_reason != EXIT_REASON_EPT_VIOLATION)) + exit_reason != EXIT_REASON_EXCEPTION_NMI) printk(KERN_WARNING "%s: unexpected, valid vectoring info and " "exit reason is 0x%x\n", __func__, exit_reason); if (exit_reason < kvm_vmx_max_exit_handlers @@ -2995,15 +2674,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); - if (id == 0 && vm_need_ept()) { - kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | - VMX_EPT_WRITABLE_MASK | - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); - kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, - VMX_EPT_FAKE_DIRTY_MASK, 0ull, - VMX_EPT_EXECUTABLE_MASK); - kvm_enable_tdp(); - } err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) @@ -3036,10 +2706,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (alloc_apic_access_page(kvm) != 0) goto free_vmcs; - if (vm_need_ept()) - if (alloc_identity_pagetable(kvm) != 0) - goto free_vmcs; - return &vmx->vcpu; free_vmcs: @@ -3069,11 +2735,6 @@ static void __init vmx_check_processor_compat(void *rtn) } } -static int get_ept_level(void) -{ - return VMX_EPT_DEFAULT_GAW + 1; -} - static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, @@ -3130,7 +2791,6 @@ static struct kvm_x86_ops vmx_x86_ops = { .inject_pending_vectors = do_interrupt_requests, .set_tss_addr = vmx_set_tss_addr, - .get_tdp_level = get_ept_level, }; static int __init vmx_init(void) @@ -3183,14 +2843,9 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); - if (cpu_has_vmx_ept()) - bypass_guest_pf = 0; - if (bypass_guest_pf) kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); - ept_sync_global(); - return 0; out2: diff --git a/trunk/arch/x86/kvm/vmx.h b/trunk/arch/x86/kvm/vmx.h index 79d94c610dfe..5dff4606b988 100644 --- a/trunk/arch/x86/kvm/vmx.h +++ b/trunk/arch/x86/kvm/vmx.h @@ -35,8 +35,6 @@ #define CPU_BASED_MWAIT_EXITING 0x00000400 #define CPU_BASED_RDPMC_EXITING 0x00000800 #define CPU_BASED_RDTSC_EXITING 0x00001000 -#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 -#define CPU_BASED_CR3_STORE_EXITING 0x00010000 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 #define CPU_BASED_CR8_STORE_EXITING 0x00100000 #define CPU_BASED_TPR_SHADOW 0x00200000 @@ -51,7 +49,6 @@ * Definitions of Secondary Processor-Based VM-Execution Controls. */ #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 -#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 @@ -103,22 +100,10 @@ enum vmcs_field { VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, APIC_ACCESS_ADDR = 0x00002014, APIC_ACCESS_ADDR_HIGH = 0x00002015, - EPT_POINTER = 0x0000201a, - EPT_POINTER_HIGH = 0x0000201b, - GUEST_PHYSICAL_ADDRESS = 0x00002400, - GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, VMCS_LINK_POINTER_HIGH = 0x00002801, GUEST_IA32_DEBUGCTL = 0x00002802, GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, - GUEST_PDPTR0 = 0x0000280a, - GUEST_PDPTR0_HIGH = 0x0000280b, - GUEST_PDPTR1 = 0x0000280c, - GUEST_PDPTR1_HIGH = 0x0000280d, - GUEST_PDPTR2 = 0x0000280e, - GUEST_PDPTR2_HIGH = 0x0000280f, - GUEST_PDPTR3 = 0x00002810, - GUEST_PDPTR3_HIGH = 0x00002811, PIN_BASED_VM_EXEC_CONTROL = 0x00004000, CPU_BASED_VM_EXEC_CONTROL = 0x00004002, EXCEPTION_BITMAP = 0x00004004, @@ -241,8 +226,6 @@ enum vmcs_field { #define EXIT_REASON_MWAIT_INSTRUCTION 36 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 #define EXIT_REASON_APIC_ACCESS 44 -#define EXIT_REASON_EPT_VIOLATION 48 -#define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 /* @@ -333,36 +316,15 @@ enum vmcs_field { #define MSR_IA32_VMX_CR4_FIXED1 0x489 #define MSR_IA32_VMX_VMCS_ENUM 0x48a #define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b -#define MSR_IA32_VMX_EPT_VPID_CAP 0x48c #define MSR_IA32_FEATURE_CONTROL 0x3a #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 -#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT 10 #define VMX_NR_VPIDS (1 << 16) #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 #define VMX_VPID_EXTENT_ALL_CONTEXT 2 -#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 -#define VMX_EPT_EXTENT_CONTEXT 1 -#define VMX_EPT_EXTENT_GLOBAL 2 -#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) -#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) -#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) -#define VMX_EPT_DEFAULT_GAW 3 -#define VMX_EPT_MAX_GAW 0x4 -#define VMX_EPT_MT_EPTE_SHIFT 3 -#define VMX_EPT_GAW_EPTP_SHIFT 3 -#define VMX_EPT_DEFAULT_MT 0x6ull -#define VMX_EPT_READABLE_MASK 0x1ull -#define VMX_EPT_WRITABLE_MASK 0x2ull -#define VMX_EPT_EXECUTABLE_MASK 0x4ull -#define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62) -#define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63) - -#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul - #endif diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 21338bdb28ff..0ce556372a4d 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -2417,9 +2417,6 @@ int kvm_arch_init(void *opaque) kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); - kvm_mmu_set_base_ptes(PT_PRESENT_MASK); - kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, - PT_DIRTY_MASK, PT64_NX_MASK, 0); return 0; out: @@ -3022,8 +3019,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) kvm_x86_ops->decache_regs(vcpu); - vcpu->arch.exception.pending = false; - vcpu_put(vcpu); return 0; @@ -3486,7 +3481,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { - cseg_desc.type &= ~(1 << 1); //clear the B flag + cseg_desc.type &= ~(1 << 8); //clear the B flag save_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc); } @@ -3512,7 +3507,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) } if (reason != TASK_SWITCH_IRET) { - nseg_desc.type |= (1 << 1); + nseg_desc.type |= (1 << 8); save_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc); } @@ -3703,19 +3698,10 @@ void fx_init(struct kvm_vcpu *vcpu) { unsigned after_mxcsr_mask; - /* - * Touch the fpu the first time in non atomic context as if - * this is the first fpu instruction the exception handler - * will fire before the instruction returns and it'll have to - * allocate ram with GFP_KERNEL. - */ - if (!used_math()) - fx_save(&vcpu->arch.host_fx_image); - /* Initialize guest FPU by resetting ours and saving into guest's */ preempt_disable(); fx_save(&vcpu->arch.host_fx_image); - fx_finit(); + fpu_init(); fx_save(&vcpu->arch.guest_fx_image); fx_restore(&vcpu->arch.host_fx_image); preempt_enable(); @@ -3920,8 +3906,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_physmem(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); - if (kvm->arch.ept_identity_pagetable) - put_page(kvm->arch.ept_identity_pagetable); kfree(kvm); } diff --git a/trunk/arch/x86/kvm/x86_emulate.c b/trunk/arch/x86/kvm/x86_emulate.c index f2a696d6a243..2ca08386f993 100644 --- a/trunk/arch/x86/kvm/x86_emulate.c +++ b/trunk/arch/x86/kvm/x86_emulate.c @@ -1761,7 +1761,6 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) case 6: /* lmsw */ realmode_lmsw(ctxt->vcpu, (u16)c->src.val, &ctxt->eflags); - c->dst.type = OP_NONE; break; case 7: /* invlpg*/ emulate_invlpg(ctxt->vcpu, memop); diff --git a/trunk/arch/x86/mm/discontig_32.c b/trunk/arch/x86/mm/discontig_32.c index 914ccf983687..18378850e25a 100644 --- a/trunk/arch/x86/mm/discontig_32.c +++ b/trunk/arch/x86/mm/discontig_32.c @@ -476,3 +476,29 @@ int memory_add_physaddr_to_nid(u64 addr) EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif + +#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT +/* + * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA + * + * These stub functions are needed to compile 32-bit NUMA when SRAT is + * not set. There are functions in srat_64.c for parsing this table + * and it may be possible to make them common functions. + */ +void acpi_numa_slit_init (struct acpi_table_slit *slit) +{ + printk(KERN_INFO "ACPI: No support for parsing SLIT table\n"); +} + +void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa) +{ +} + +void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma) +{ +} + +void acpi_numa_arch_fixup(void) +{ +} +#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */ diff --git a/trunk/arch/x86/mm/pat.c b/trunk/arch/x86/mm/pat.c index 60adbe22efa0..277446cd30b6 100644 --- a/trunk/arch/x86/mm/pat.c +++ b/trunk/arch/x86/mm/pat.c @@ -25,24 +25,31 @@ #include #include -#ifdef CONFIG_X86_PAT -int __read_mostly pat_wc_enabled = 1; +int pat_wc_enabled = 1; -void __init pat_disable(char *reason) -{ - pat_wc_enabled = 0; - printk(KERN_INFO "%s\n", reason); -} +static u64 __read_mostly boot_pat_state; static int nopat(char *str) { - pat_disable("PAT support disabled."); + pat_wc_enabled = 0; + printk(KERN_INFO "x86: PAT support disabled.\n"); + return 0; } early_param("nopat", nopat); -#endif -static u64 __read_mostly boot_pat_state; +static int pat_known_cpu(void) +{ + if (!pat_wc_enabled) + return 0; + + if (cpu_has_pat) + return 1; + + pat_wc_enabled = 0; + printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); + return 0; +} enum { PAT_UC = 0, /* uncached */ @@ -59,19 +66,17 @@ void pat_init(void) { u64 pat; - if (!pat_wc_enabled) +#ifndef CONFIG_X86_PAT + nopat(NULL); +#endif + + /* Boot CPU enables PAT based on CPU feature */ + if (!smp_processor_id() && !pat_known_cpu()) return; - /* Paranoia check. */ - if (!cpu_has_pat) { - printk(KERN_ERR "PAT enabled, but CPU feature cleared\n"); - /* - * Panic if this happens on the secondary CPU, and we - * switched to PAT on the boot CPU. We have no way to - * undo PAT. - */ - BUG_ON(boot_pat_state); - } + /* APs enable PAT iff boot CPU has enabled it before */ + if (smp_processor_id() && !pat_wc_enabled) + return; /* Set PWT to Write-Combining. All other bits stay the same */ /* @@ -90,8 +95,9 @@ void pat_init(void) PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); /* Boot CPU check */ - if (!boot_pat_state) + if (!smp_processor_id()) { rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); + } wrmsrl(MSR_IA32_CR_PAT, pat); printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", diff --git a/trunk/arch/x86/mm/pgtable_32.c b/trunk/arch/x86/mm/pgtable_32.c index 369cf065b6a4..9ee007be9142 100644 --- a/trunk/arch/x86/mm/pgtable_32.c +++ b/trunk/arch/x86/mm/pgtable_32.c @@ -172,3 +172,10 @@ void reserve_top_address(unsigned long reserve) __FIXADDR_TOP = -reserve - PAGE_SIZE; __VMALLOC_RESERVE += reserve; } + +int pmd_bad(pmd_t pmd) +{ + WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd)); + + return pmd_bad_v1(pmd); +} diff --git a/trunk/arch/x86/pci/Makefile_32 b/trunk/arch/x86/pci/Makefile_32 index 89ec35d00efd..7fa519868d70 100644 --- a/trunk/arch/x86/pci/Makefile_32 +++ b/trunk/arch/x86/pci/Makefile_32 @@ -6,19 +6,11 @@ obj-$(CONFIG_PCI_DIRECT) += direct.o obj-$(CONFIG_PCI_OLPC) += olpc.o pci-y := fixup.o - -# Do not change the ordering here. There is a nasty init function -# ordering dependency which breaks when you move acpi.o below -# legacy/irq.o pci-$(CONFIG_ACPI) += acpi.o pci-y += legacy.o irq.o -# Careful: VISWS and NUMAQ overrule the pci-y above. The colons are -# therefor correct. This needs a proper fix by distangling the code. -pci-$(CONFIG_X86_VISWS) := visws.o fixup.o -pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o - -# Necessary for NUMAQ as well +pci-$(CONFIG_X86_VISWS) += visws.o fixup.o +pci-$(CONFIG_X86_NUMAQ) += numa.o irq.o pci-$(CONFIG_NUMA) += mp_bus_to_node.o obj-y += $(pci-y) common.o early.o diff --git a/trunk/arch/x86/pci/acpi.c b/trunk/arch/x86/pci/acpi.c index d95de2f199cd..1a9c0c6a1a18 100644 --- a/trunk/arch/x86/pci/acpi.c +++ b/trunk/arch/x86/pci/acpi.c @@ -6,6 +6,45 @@ #include #include "pci.h" +static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d) +{ + pci_probe |= PCI_CAN_SKIP_ISA_ALIGN; + printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident); + return 0; +} + +static struct dmi_system_id acpi_pciprobe_dmi_table[] __devinitdata = { +/* + * Systems where PCI IO resource ISA alignment can be skipped + * when the ISA enable bit in the bridge control is not set + */ + { + .callback = can_skip_ioresource_align, + .ident = "IBM System x3800", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), + }, + }, + { + .callback = can_skip_ioresource_align, + .ident = "IBM System x3850", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "x3850"), + }, + }, + { + .callback = can_skip_ioresource_align, + .ident = "IBM System x3950", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "x3950"), + }, + }, + {} +}; + struct pci_root_info { char *name; unsigned int res_num; @@ -157,6 +196,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do int pxm; #endif + dmi_check_system(acpi_pciprobe_dmi_table); + if (domain && !pci_domains_supported) { printk(KERN_WARNING "PCI: Multiple domains not supported " "(dom %d, bus %d)\n", domain, busnum); diff --git a/trunk/arch/x86/pci/common.c b/trunk/arch/x86/pci/common.c index 8545c8a9d107..2a4d751818b7 100644 --- a/trunk/arch/x86/pci/common.c +++ b/trunk/arch/x86/pci/common.c @@ -77,48 +77,17 @@ int pcibios_scanned; */ DEFINE_SPINLOCK(pci_config_lock); -static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d) +static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) { - pci_probe |= PCI_CAN_SKIP_ISA_ALIGN; - printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident); - return 0; -} - -static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = { -/* - * Systems where PCI IO resource ISA alignment can be skipped - * when the ISA enable bit in the bridge control is not set - */ - { - .callback = can_skip_ioresource_align, - .ident = "IBM System x3800", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), - }, - }, - { - .callback = can_skip_ioresource_align, - .ident = "IBM System x3850", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "x3850"), - }, - }, - { - .callback = can_skip_ioresource_align, - .ident = "IBM System x3950", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "x3950"), - }, - }, - {} -}; - -void __init dmi_check_skip_isa_align(void) -{ - dmi_check_system(can_skip_pciprobe_dmi_table); + struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE]; + + if (rom_r->parent) + return; + if (rom_r->start) + /* we deal with BIOS assigned ROM later */ + return; + if (!(pci_probe & PCI_ASSIGN_ROMS)) + rom_r->start = rom_r->end = rom_r->flags = 0; } /* @@ -128,7 +97,11 @@ void __init dmi_check_skip_isa_align(void) void __devinit pcibios_fixup_bus(struct pci_bus *b) { + struct pci_dev *dev; + pci_read_bridge_bases(b); + list_for_each_entry(dev, &b->devices, bus_list) + pcibios_fixup_device_resources(dev); } /* @@ -345,16 +318,13 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { {} }; -void __init dmi_check_pciprobe(void) -{ - dmi_check_system(pciprobe_dmi_table); -} - struct pci_bus * __devinit pcibios_scan_root(int busnum) { struct pci_bus *bus = NULL; struct pci_sysdata *sd; + dmi_check_system(pciprobe_dmi_table); + while ((bus = pci_find_next_bus(bus)) != NULL) { if (bus->number == busnum) { /* Already scanned */ @@ -492,9 +462,6 @@ char * __devinit pcibios_setup(char *str) } else if (!strcmp(str, "routeirq")) { pci_routeirq = 1; return NULL; - } else if (!strcmp(str, "skip_isa_align")) { - pci_probe |= PCI_CAN_SKIP_ISA_ALIGN; - return NULL; } return str; } @@ -522,7 +489,7 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } -struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) +struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) { struct pci_bus *bus = NULL; struct pci_sysdata *sd; @@ -545,7 +512,7 @@ struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, return bus; } -struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno) +struct pci_bus *pci_scan_bus_with_sysdata(int busno) { return pci_scan_bus_on_node(busno, &pci_root_ops, -1); } diff --git a/trunk/arch/x86/pci/fixup.c b/trunk/arch/x86/pci/fixup.c index ff3a6a336342..b60b2abd480c 100644 --- a/trunk/arch/x86/pci/fixup.c +++ b/trunk/arch/x86/pci/fixup.c @@ -502,7 +502,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, */ static void fam10h_pci_cfg_space_size(struct pci_dev *dev) { - dev->cfg_size = pci_cfg_space_size_ext(dev); + dev->cfg_size = pci_cfg_space_size_ext(dev, 0); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); diff --git a/trunk/arch/x86/pci/init.c b/trunk/arch/x86/pci/init.c index e70b9c57b88e..dd30c6076b5d 100644 --- a/trunk/arch/x86/pci/init.c +++ b/trunk/arch/x86/pci/init.c @@ -33,10 +33,6 @@ static __init int pci_access_init(void) printk(KERN_ERR "PCI: Fatal: No config space access function found\n"); - dmi_check_pciprobe(); - - dmi_check_skip_isa_align(); - return 0; } arch_initcall(pci_access_init); diff --git a/trunk/arch/x86/pci/k8-bus_64.c b/trunk/arch/x86/pci/k8-bus_64.c index 5c2799c20e47..ab6d4b18a88f 100644 --- a/trunk/arch/x86/pci/k8-bus_64.c +++ b/trunk/arch/x86/pci/k8-bus_64.c @@ -504,6 +504,14 @@ static int __init early_fill_mp_bus_info(void) } } +#ifdef CONFIG_NUMA + for (i = 0; i < BUS_NR; i++) { + node = mp_bus_to_node[i]; + if (node >= 0) + printk(KERN_DEBUG "bus: %02x to node: %02x\n", i, node); + } +#endif + for (i = 0; i < pci_root_num; i++) { int res_num; int busnum; diff --git a/trunk/arch/x86/pci/pci.h b/trunk/arch/x86/pci/pci.h index f3972b12c60a..c58805a92db5 100644 --- a/trunk/arch/x86/pci/pci.h +++ b/trunk/arch/x86/pci/pci.h @@ -38,9 +38,6 @@ enum pci_bf_sort_state { pci_dmi_bf, }; -extern void __init dmi_check_pciprobe(void); -extern void __init dmi_check_skip_isa_align(void); - /* pci-i386.c */ extern unsigned int pcibios_max_latency; diff --git a/trunk/arch/x86/vdso/vdso32-setup.c b/trunk/arch/x86/vdso/vdso32-setup.c index cf058fecfcee..4dceeb1fc5e0 100644 --- a/trunk/arch/x86/vdso/vdso32-setup.c +++ b/trunk/arch/x86/vdso/vdso32-setup.c @@ -162,7 +162,7 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr) Elf32_Shdr *shdr; int i; - BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 || + BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || !elf_check_arch_ia32(ehdr) || ehdr->e_type != ET_DYN); diff --git a/trunk/arch/x86/video/fbdev.c b/trunk/arch/x86/video/fbdev.c index 69527688f794..4db42bff8c60 100644 --- a/trunk/arch/x86/video/fbdev.c +++ b/trunk/arch/x86/video/fbdev.c @@ -1,4 +1,5 @@ /* + * * Copyright (C) 2007 Antonino Daplas * * This file is subject to the terms and conditions of the GNU General Public @@ -28,4 +29,3 @@ int fb_is_primary_device(struct fb_info *info) return retval; } EXPORT_SYMBOL(fb_is_primary_device); -MODULE_LICENSE("GPL"); diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 2987fe47b5ee..b754a4a2f9bd 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -54,16 +54,15 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); static void drive_stat_acct(struct request *rq, int new_io) { - struct hd_struct *part; int rw = rq_data_dir(rq); if (!blk_fs_request(rq) || !rq->rq_disk) return; - part = get_part(rq->rq_disk, rq->sector); - if (!new_io) - __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); - else { + if (!new_io) { + __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); + } else { + struct hd_struct *part = get_part(rq->rq_disk, rq->sector); disk_round_stats(rq->rq_disk); rq->rq_disk->in_flight++; if (part) { @@ -254,11 +253,9 @@ EXPORT_SYMBOL(__generic_unplug_device); **/ void generic_unplug_device(struct request_queue *q) { - if (blk_queue_plugged(q)) { - spin_lock_irq(q->queue_lock); - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); - } + spin_lock_irq(q->queue_lock); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL(generic_unplug_device); @@ -1539,11 +1536,10 @@ static int __end_that_request_first(struct request *req, int error, } if (blk_fs_request(req) && req->rq_disk) { - struct hd_struct *part = get_part(req->rq_disk, req->sector); const int rw = rq_data_dir(req); - all_stat_add(req->rq_disk, part, sectors[rw], - nr_bytes >> 9, req->sector); + all_stat_add(req->rq_disk, sectors[rw], + nr_bytes >> 9, req->sector); } total_bytes = bio_nbytes = 0; @@ -1729,8 +1725,8 @@ static void end_that_request_last(struct request *req, int error) const int rw = rq_data_dir(req); struct hd_struct *part = get_part(disk, req->sector); - __all_stat_inc(disk, part, ios[rw], req->sector); - __all_stat_add(disk, part, ticks[rw], duration, req->sector); + __all_stat_inc(disk, ios[rw], req->sector); + __all_stat_add(disk, ticks[rw], duration, req->sector); disk_round_stats(disk); disk->in_flight--; if (part) { diff --git a/trunk/block/blk-ioc.c b/trunk/block/blk-ioc.c index 012f065ac8e2..e34df7c9fc36 100644 --- a/trunk/block/blk-ioc.c +++ b/trunk/block/blk-ioc.c @@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - cfq_dtor(ioc); rcu_read_unlock(); + cfq_dtor(ioc); kmem_cache_free(iocontext_cachep, ioc); return 1; diff --git a/trunk/block/blk-merge.c b/trunk/block/blk-merge.c index 651136aae76e..73b23562af20 100644 --- a/trunk/block/blk-merge.c +++ b/trunk/block/blk-merge.c @@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (!bio_flagged(bio, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); - if (!bio_flagged(nxt, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) @@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, q->last_merge = NULL; return 0; } - if (!bio_flagged(req->biotail, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) blk_recount_segments(q, req->biotail); - if (!bio_flagged(bio, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) @@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, return 0; } len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; - if (!bio_flagged(bio, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); - if (!bio_flagged(req->bio, BIO_SEG_VALID)) + if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) blk_recount_segments(q, req->bio); if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && !BIOVEC_VIRT_OVERSIZE(len)) { diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index 304ec73ab821..e85c4013e8a2 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -146,13 +146,11 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); - spin_lock_irq(q->queue_lock); if (nm) - queue_flag_set(QUEUE_FLAG_NOMERGES, q); + set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); else - queue_flag_clear(QUEUE_FLAG_NOMERGES, q); + clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); - spin_unlock_irq(q->queue_lock); return ret; } diff --git a/trunk/block/blk-tag.c b/trunk/block/blk-tag.c index 32667beb03ee..de64e0429977 100644 --- a/trunk/block/blk-tag.c +++ b/trunk/block/blk-tag.c @@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) __blk_free_tags(bqt); q->queue_tags = NULL; - queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); + queue_flag_clear(QUEUE_FLAG_QUEUED, q); } /** @@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); **/ void blk_queue_free_tags(struct request_queue *q) { - queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); + queue_flag_clear(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); @@ -171,9 +171,6 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use - * - * Queue lock must be held here if the function is called to resize an - * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) @@ -200,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, * assign it, all done */ q->queue_tags = tags; - queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); + queue_flag_set(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index b399c62936e0..f4e1006c253d 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1142,17 +1142,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) kmem_cache_free(cfq_pool, cfqq); } -static void -__call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) -{ - struct cfq_io_context *cic; - struct hlist_node *n; - - hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) - func(ioc, cic); -} - /* * Call func for each cic attached to this ioc. */ @@ -1160,8 +1149,12 @@ static void call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { + struct cfq_io_context *cic; + struct hlist_node *n; + rcu_read_lock(); - __call_for_each_cic(ioc, func); + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) + func(ioc, cic); rcu_read_unlock(); } @@ -1205,7 +1198,7 @@ static void cfq_free_io_context(struct io_context *ioc) * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. */ - __call_for_each_cic(ioc, cic_free_func); + call_for_each_cic(ioc, cic_free_func); } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) @@ -1303,10 +1296,10 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* - * no prio set, inherit CPU scheduling settings + * no prio set, place us in the middle of the BE classes */ cfqq->ioprio = task_nice_ioprio(tsk); - cfqq->ioprio_class = task_nice_ioclass(tsk); + cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); diff --git a/trunk/crypto/hmac.c b/trunk/crypto/hmac.c index 14c6351e639d..b60c3c7aa320 100644 --- a/trunk/crypto/hmac.c +++ b/trunk/crypto/hmac.c @@ -57,35 +57,14 @@ static int hmac_setkey(struct crypto_hash *parent, if (keylen > bs) { struct hash_desc desc; struct scatterlist tmp; - int tmplen; int err; desc.tfm = tfm; desc.flags = crypto_hash_get_flags(parent); desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; + sg_init_one(&tmp, inkey, keylen); - err = crypto_hash_init(&desc); - if (err) - return err; - - tmplen = bs * 2 + ds; - sg_init_one(&tmp, ipad, tmplen); - - for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) { - memcpy(ipad, inkey, tmplen); - err = crypto_hash_update(&desc, &tmp, tmplen); - if (err) - return err; - } - - if (keylen) { - memcpy(ipad, inkey, keylen); - err = crypto_hash_update(&desc, &tmp, keylen); - if (err) - return err; - } - - err = crypto_hash_final(&desc, digest); + err = crypto_hash_digest(&desc, &tmp, keylen, digest); if (err) return err; diff --git a/trunk/drivers/accessibility/Kconfig b/trunk/drivers/accessibility/Kconfig index ef3b65bfdd0a..1264c4b98094 100644 --- a/trunk/drivers/accessibility/Kconfig +++ b/trunk/drivers/accessibility/Kconfig @@ -1,17 +1,7 @@ menuconfig ACCESSIBILITY bool "Accessibility support" ---help--- - Accessibility handles all special kinds of hardware devices or - software adapters which help people with disabilities (e.g. - blindness) to use computers. - - That includes braille devices, speech synthesis, keyboard - remapping, etc. - - Say Y here to get to see options for accessibility. - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. + Enable a submenu where accessibility items may be enabled. If unsure, say N. diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig index 9bf2986a2788..1c11df9a5f32 100644 --- a/trunk/drivers/ata/Kconfig +++ b/trunk/drivers/ata/Kconfig @@ -205,8 +205,8 @@ config SATA_VITESSE If unsure, say N. config SATA_INIC162X - tristate "Initio 162x SATA support" - depends on PCI + tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)" + depends on PCI && EXPERIMENTAL help This option enables support for Initio 162x Serial ATA. @@ -697,15 +697,6 @@ config PATA_SCC If unsure, say N. -config PATA_SCH - tristate "Intel SCH PATA support" - depends on PCI - help - This option enables support for Intel SCH PATA on the Intel - SCH (US15W, US15L, UL11L) series host controllers. - - If unsure, say N. - config PATA_BF54X tristate "Blackfin 54x ATAPI support" depends on BF542 || BF548 || BF549 diff --git a/trunk/drivers/ata/Makefile b/trunk/drivers/ata/Makefile index 674965fa326d..b693d829383a 100644 --- a/trunk/drivers/ata/Makefile +++ b/trunk/drivers/ata/Makefile @@ -67,7 +67,6 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o obj-$(CONFIG_PATA_SCC) += pata_scc.o -obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 97f83fb2ee2e..8cace9aa9c03 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -1267,7 +1267,9 @@ static int ahci_check_ready(struct ata_link *link) void __iomem *port_mmio = ahci_port_base(link->ap); u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; - return ata_check_ready(status); + if (!(status & ATA_BUSY)) + return 1; + return 0; } static int ahci_softreset(struct ata_link *link, unsigned int *class, diff --git a/trunk/drivers/ata/ata_generic.c b/trunk/drivers/ata/ata_generic.c index 75a406f5e694..47aeccd52fa9 100644 --- a/trunk/drivers/ata/ata_generic.c +++ b/trunk/drivers/ata/ata_generic.c @@ -152,12 +152,6 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id if (dev->vendor == PCI_VENDOR_ID_AL) ata_pci_bmdma_clear_simplex(dev); - if (dev->vendor == PCI_VENDOR_ID_ATI) { - int rc = pcim_enable_device(dev); - if (rc < 0) - return rc; - pcim_pin_device(dev); - } return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); } diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index a9027b8fbdd5..ea2c7649d399 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -1348,8 +1348,6 @@ static void __devinit piix_init_sidpr(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct piix_host_priv *hpriv = host->private_data; - struct ata_device *dev0 = &host->ports[0]->link.device[0]; - u32 scontrol; int i; /* check for availability */ @@ -1368,29 +1366,6 @@ static void __devinit piix_init_sidpr(struct ata_host *host) return; hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; - - /* SCR access via SIDPR doesn't work on some configurations. - * Give it a test drive by inhibiting power save modes which - * we'll do anyway. - */ - scontrol = piix_sidpr_read(dev0, SCR_CONTROL); - - /* if IPM is already 3, SCR access is probably working. Don't - * un-inhibit power save modes as BIOS might have inhibited - * them for a reason. - */ - if ((scontrol & 0xf00) != 0x300) { - scontrol |= 0x300; - piix_sidpr_write(dev0, SCR_CONTROL, scontrol); - scontrol = piix_sidpr_read(dev0, SCR_CONTROL); - - if ((scontrol & 0xf00) != 0x300) { - dev_printk(KERN_INFO, host->dev, "SCR access via " - "SIDPR is available but doesn't work\n"); - return; - } - } - host->ports[0]->ops = &piix_sidpr_sata_ops; host->ports[1]->ops = &piix_sidpr_sata_ops; } diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 927b692d723c..3bc488538204 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -6292,7 +6292,6 @@ EXPORT_SYMBOL_GPL(ata_eh_freeze_port); EXPORT_SYMBOL_GPL(ata_eh_thaw_port); EXPORT_SYMBOL_GPL(ata_eh_qc_complete); EXPORT_SYMBOL_GPL(ata_eh_qc_retry); -EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); EXPORT_SYMBOL_GPL(ata_do_eh); EXPORT_SYMBOL_GPL(ata_std_error_handler); diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index 62e033146bed..61dcd0026c64 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -1357,7 +1357,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) * LOCKING: * Kernel thread context (may sleep). */ -void ata_eh_analyze_ncq_error(struct ata_link *link) +static void ata_eh_analyze_ncq_error(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; diff --git a/trunk/drivers/ata/libata-sff.c b/trunk/drivers/ata/libata-sff.c index 3c2d2289f85e..2ec65a8fda79 100644 --- a/trunk/drivers/ata/libata-sff.c +++ b/trunk/drivers/ata/libata-sff.c @@ -314,7 +314,11 @@ static int ata_sff_check_ready(struct ata_link *link) { u8 status = link->ap->ops->sff_check_status(link->ap); - return ata_check_ready(status); + if (!(status & ATA_BUSY)) + return 1; + if (status == 0xff) + return -ENODEV; + return 0; } /** diff --git a/trunk/drivers/ata/pata_acpi.c b/trunk/drivers/ata/pata_acpi.c index fbe605711554..c5f91e629945 100644 --- a/trunk/drivers/ata/pata_acpi.c +++ b/trunk/drivers/ata/pata_acpi.c @@ -259,12 +259,6 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) .port_ops = &pacpi_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; - if (pdev->vendor == PCI_VENDOR_ID_ATI) { - int rc = pcim_enable_device(pdev); - if (rc < 0) - return rc; - pcim_pin_device(pdev); - } return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); } diff --git a/trunk/drivers/ata/pata_sch.c b/trunk/drivers/ata/pata_sch.c deleted file mode 100644 index c8cc027789fe..000000000000 --- a/trunk/drivers/ata/pata_sch.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * pata_sch.c - Intel SCH PATA controllers - * - * Copyright (c) 2008 Alek Du - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -/* - * Supports: - * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at: - * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "pata_sch" -#define DRV_VERSION "0.2" - -/* see SCH datasheet page 351 */ -enum { - D0TIM = 0x80, /* Device 0 Timing Register */ - D1TIM = 0x84, /* Device 1 Timing Register */ - PM = 0x07, /* PIO Mode Bit Mask */ - MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */ - UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */ - PPE = (1 << 30), /* Prefetch/Post Enable */ - USD = (1 << 31), /* Use Synchronous DMA */ -}; - -static int sch_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent); -static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev); -static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev); - -static const struct pci_device_id sch_pci_tbl[] = { - /* Intel SCH PATA Controller */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 }, - { } /* terminate list */ -}; - -static struct pci_driver sch_pci_driver = { - .name = DRV_NAME, - .id_table = sch_pci_tbl, - .probe = sch_init_one, - .remove = ata_pci_remove_one, -#ifdef CONFIG_PM - .suspend = ata_pci_device_suspend, - .resume = ata_pci_device_resume, -#endif -}; - -static struct scsi_host_template sch_sht = { - ATA_BMDMA_SHT(DRV_NAME), -}; - -static struct ata_port_operations sch_pata_ops = { - .inherits = &ata_bmdma_port_ops, - .cable_detect = ata_cable_unknown, - .set_piomode = sch_set_piomode, - .set_dmamode = sch_set_dmamode, -}; - -static struct ata_port_info sch_port_info = { - .flags = 0, - .pio_mask = ATA_PIO4, /* pio0-4 */ - .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */ - .udma_mask = ATA_UDMA5, /* udma0-5 */ - .port_ops = &sch_pata_ops, -}; - -MODULE_AUTHOR("Alek Du "); -MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers"); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, sch_pci_tbl); -MODULE_VERSION(DRV_VERSION); - -/** - * sch_set_piomode - Initialize host controller PATA PIO timings - * @ap: Port whose timings we are configuring - * @adev: ATA device - * - * Set PIO mode for device, in host controller PCI config space. - * - * LOCKING: - * None (inherited from caller). - */ - -static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev) -{ - unsigned int pio = adev->pio_mode - XFER_PIO_0; - struct pci_dev *dev = to_pci_dev(ap->host->dev); - unsigned int port = adev->devno ? D1TIM : D0TIM; - unsigned int data; - - pci_read_config_dword(dev, port, &data); - /* see SCH datasheet page 351 */ - /* set PIO mode */ - data &= ~(PM | PPE); - data |= pio; - /* enable PPE for block device */ - if (adev->class == ATA_DEV_ATA) - data |= PPE; - pci_write_config_dword(dev, port, data); -} - -/** - * sch_set_dmamode - Initialize host controller PATA DMA timings - * @ap: Port whose timings we are configuring - * @adev: ATA device - * - * Set MW/UDMA mode for device, in host controller PCI config space. - * - * LOCKING: - * None (inherited from caller). - */ - -static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev) -{ - unsigned int dma_mode = adev->dma_mode; - struct pci_dev *dev = to_pci_dev(ap->host->dev); - unsigned int port = adev->devno ? D1TIM : D0TIM; - unsigned int data; - - pci_read_config_dword(dev, port, &data); - /* see SCH datasheet page 351 */ - if (dma_mode >= XFER_UDMA_0) { - /* enable Synchronous DMA mode */ - data |= USD; - data &= ~UDM; - data |= (dma_mode - XFER_UDMA_0) << 16; - } else { /* must be MWDMA mode, since we masked SWDMA already */ - data &= ~(USD | MDM); - data |= (dma_mode - XFER_MW_DMA_0) << 8; - } - pci_write_config_dword(dev, port, data); -} - -/** - * sch_init_one - Register SCH ATA PCI device with kernel services - * @pdev: PCI device to register - * @ent: Entry in sch_pci_tbl matching with @pdev - * - * LOCKING: - * Inherited from PCI layer (may sleep). - * - * RETURNS: - * Zero on success, or -ERRNO value. - */ - -static int __devinit sch_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - static int printed_version; - const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; - struct ata_host *host; - int rc; - - if (!printed_version++) - dev_printk(KERN_DEBUG, &pdev->dev, - "version " DRV_VERSION "\n"); - - /* enable device and prepare host */ - rc = pcim_enable_device(pdev); - if (rc) - return rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); - if (rc) - return rc; - pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht); -} - -static int __init sch_init(void) -{ - return pci_register_driver(&sch_pci_driver); -} - -static void __exit sch_exit(void) -{ - pci_unregister_driver(&sch_pci_driver); -} - -module_init(sch_init); -module_exit(sch_exit); diff --git a/trunk/drivers/ata/sata_inic162x.c b/trunk/drivers/ata/sata_inic162x.c index 3ead02fe379e..d27bb9a2568f 100644 --- a/trunk/drivers/ata/sata_inic162x.c +++ b/trunk/drivers/ata/sata_inic162x.c @@ -10,33 +10,13 @@ * right. Documentation is available at initio's website but it only * documents registers (not programming model). * - * This driver has interesting history. The first version was written - * from the documentation and a 2.4 IDE driver posted on a Taiwan - * company, which didn't use any IDMA features and couldn't handle - * LBA48. The resulting driver couldn't handle LBA48 devices either - * making it pretty useless. - * - * After a while, initio picked the driver up, renamed it to - * sata_initio162x, updated it to use IDMA for ATA DMA commands and - * posted it on their website. It only used ATA_PROT_DMA for IDMA and - * attaching both devices and issuing IDMA and !IDMA commands - * simultaneously broke it due to PIRQ masking interaction but it did - * show how to use the IDMA (ADMA + some initio specific twists) - * engine. - * - * Then, I picked up their changes again and here's the usable driver - * which uses IDMA for everything. Everything works now including - * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some - * issues tho. Result Tf is not resported properly, NCQ isn't - * supported yet and CD/DVD writing works with DMA assisted PIO - * protocol (which, for native SATA devices, shouldn't cause any - * noticeable difference). - * - * Anyways, so, here's finally a working driver for inic162x. Enjoy! - * - * initio: If you guys wanna improve the driver regarding result TF - * access and other stuff, please feel free to contact me. I'll be - * happy to assist. + * - ATA disks work. + * - Hotplug works. + * - ATAPI read works but burning doesn't. This thing is really + * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and + * ATAPI DMA WRITE should be programmed. If you've got a clue, be + * my guest. + * - Both STR and STD work. */ #include @@ -48,19 +28,13 @@ #include #define DRV_NAME "sata_inic162x" -#define DRV_VERSION "0.4" +#define DRV_VERSION "0.3" enum { - MMIO_BAR_PCI = 5, - MMIO_BAR_CARDBUS = 1, + MMIO_BAR = 5, NR_PORTS = 2, - IDMA_CPB_TBL_SIZE = 4 * 32, - - INIC_DMA_BOUNDARY = 0xffffff, - - HOST_ACTRL = 0x08, HOST_CTL = 0x7c, HOST_STAT = 0x7e, HOST_IRQ_STAT = 0xbc, @@ -69,37 +43,22 @@ enum { PORT_SIZE = 0x40, /* registers for ATA TF operation */ - PORT_TF_DATA = 0x00, - PORT_TF_FEATURE = 0x01, - PORT_TF_NSECT = 0x02, - PORT_TF_LBAL = 0x03, - PORT_TF_LBAM = 0x04, - PORT_TF_LBAH = 0x05, - PORT_TF_DEVICE = 0x06, - PORT_TF_COMMAND = 0x07, - PORT_TF_ALT_STAT = 0x08, + PORT_TF = 0x00, + PORT_ALT_STAT = 0x08, PORT_IRQ_STAT = 0x09, PORT_IRQ_MASK = 0x0a, PORT_PRD_CTL = 0x0b, PORT_PRD_ADDR = 0x0c, PORT_PRD_XFERLEN = 0x10, - PORT_CPB_CPBLAR = 0x18, - PORT_CPB_PTQFIFO = 0x1c, /* IDMA register */ PORT_IDMA_CTL = 0x14, - PORT_IDMA_STAT = 0x16, - - PORT_RPQ_FIFO = 0x1e, - PORT_RPQ_CNT = 0x1f, PORT_SCR = 0x20, /* HOST_CTL bits */ HCTL_IRQOFF = (1 << 8), /* global IRQ off */ - HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ - HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ - HCTL_PWRDWN = (1 << 12), /* power down PHYs */ + HCTL_PWRDWN = (1 << 13), /* power down PHYs */ HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ HCTL_RPGSEL = (1 << 15), /* register page select */ @@ -122,7 +81,9 @@ enum { PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, - PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, + + PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, + PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, PIRQ_MASK_FREEZE = 0xff, /* PORT_PRD_CTL bits */ @@ -135,104 +96,20 @@ enum { IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ - - /* PORT_IDMA_STAT bits */ - IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ - IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ - IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ - IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ - IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ - IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ - IDMA_STAT_DONE = (1 << 7), /* ADMA done */ - - IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, - - /* CPB Control Flags*/ - CPB_CTL_VALID = (1 << 0), /* CPB valid */ - CPB_CTL_QUEUED = (1 << 1), /* queued command */ - CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ - CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ - CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ - - /* CPB Response Flags */ - CPB_RESP_DONE = (1 << 0), /* ATA command complete */ - CPB_RESP_REL = (1 << 1), /* ATA release */ - CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ - CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ - CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ - CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ - CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ - CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ - - /* PRD Control Flags */ - PRD_DRAIN = (1 << 1), /* ignore data excess */ - PRD_CDB = (1 << 2), /* atapi packet command pointer */ - PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ - PRD_DMA = (1 << 4), /* data transfer method */ - PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ - PRD_IOM = (1 << 6), /* io/memory transfer */ - PRD_END = (1 << 7), /* APRD chain end */ }; -/* Comman Parameter Block */ -struct inic_cpb { - u8 resp_flags; /* Response Flags */ - u8 error; /* ATA Error */ - u8 status; /* ATA Status */ - u8 ctl_flags; /* Control Flags */ - __le32 len; /* Total Transfer Length */ - __le32 prd; /* First PRD pointer */ - u8 rsvd[4]; - /* 16 bytes */ - u8 feature; /* ATA Feature */ - u8 hob_feature; /* ATA Ex. Feature */ - u8 device; /* ATA Device/Head */ - u8 mirctl; /* Mirror Control */ - u8 nsect; /* ATA Sector Count */ - u8 hob_nsect; /* ATA Ex. Sector Count */ - u8 lbal; /* ATA Sector Number */ - u8 hob_lbal; /* ATA Ex. Sector Number */ - u8 lbam; /* ATA Cylinder Low */ - u8 hob_lbam; /* ATA Ex. Cylinder Low */ - u8 lbah; /* ATA Cylinder High */ - u8 hob_lbah; /* ATA Ex. Cylinder High */ - u8 command; /* ATA Command */ - u8 ctl; /* ATA Control */ - u8 slave_error; /* Slave ATA Error */ - u8 slave_status; /* Slave ATA Status */ - /* 32 bytes */ -} __packed; - -/* Physical Region Descriptor */ -struct inic_prd { - __le32 mad; /* Physical Memory Address */ - __le16 len; /* Transfer Length */ - u8 rsvd; - u8 flags; /* Control Flags */ -} __packed; - -struct inic_pkt { - struct inic_cpb cpb; - struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ - u8 cdb[ATAPI_CDB_LEN]; -} __packed; - struct inic_host_priv { - void __iomem *mmio_base; - u16 cached_hctl; + u16 cached_hctl; }; struct inic_port_priv { - struct inic_pkt *pkt; - dma_addr_t pkt_dma; - u32 *cpb_tbl; - dma_addr_t cpb_tbl_dma; + u8 dfl_prdctl; + u8 cached_prdctl; + u8 cached_pirq_mask; }; static struct scsi_host_template inic_sht = { - ATA_BASE_SHT(DRV_NAME), - .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ - .dma_boundary = INIC_DMA_BOUNDARY, + ATA_BMDMA_SHT(DRV_NAME), }; static const int scr_map[] = { @@ -243,34 +120,54 @@ static const int scr_map[] = { static void __iomem *inic_port_base(struct ata_port *ap) { - struct inic_host_priv *hpriv = ap->host->private_data; + return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; +} + +static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) +{ + void __iomem *port_base = inic_port_base(ap); + struct inic_port_priv *pp = ap->private_data; - return hpriv->mmio_base + ap->port_no * PORT_SIZE; + writeb(mask, port_base + PORT_IRQ_MASK); + pp->cached_pirq_mask = mask; +} + +static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) +{ + struct inic_port_priv *pp = ap->private_data; + + if (pp->cached_pirq_mask != mask) + __inic_set_pirq_mask(ap, mask); } static void inic_reset_port(void __iomem *port_base) { void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; + u16 ctl; - /* stop IDMA engine */ - readw(idma_ctl); /* flush */ - msleep(1); + ctl = readw(idma_ctl); + ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); /* mask IRQ and assert reset */ - writew(IDMA_CTL_RST_IDMA, idma_ctl); + writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); readw(idma_ctl); /* flush */ + + /* give it some time */ msleep(1); /* release reset */ - writew(0, idma_ctl); + writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); /* clear irq */ writeb(0xff, port_base + PORT_IRQ_STAT); + + /* reenable ATA IRQ, turn off IDMA mode */ + writew(ctl, idma_ctl); } static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) { - void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; + void __iomem *scr_addr = ap->ioaddr.scr_addr; void __iomem *addr; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) @@ -287,126 +184,120 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) { - void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; + void __iomem *scr_addr = ap->ioaddr.scr_addr; + void __iomem *addr; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) return -EINVAL; + addr = scr_addr + scr_map[sc_reg] * 4; writel(val, scr_addr + scr_map[sc_reg] * 4); return 0; } -static void inic_stop_idma(struct ata_port *ap) -{ - void __iomem *port_base = inic_port_base(ap); - - readb(port_base + PORT_RPQ_FIFO); - readb(port_base + PORT_RPQ_CNT); - writew(0, port_base + PORT_IDMA_CTL); -} - -static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) +/* + * In TF mode, inic162x is very similar to SFF device. TF registers + * function the same. DMA engine behaves similary using the same PRD + * format as BMDMA but different command register, interrupt and event + * notification methods are used. The following inic_bmdma_*() + * functions do the impedance matching. + */ +static void inic_bmdma_setup(struct ata_queued_cmd *qc) { - struct ata_eh_info *ehi = &ap->link.eh_info; + struct ata_port *ap = qc->ap; struct inic_port_priv *pp = ap->private_data; - struct inic_cpb *cpb = &pp->pkt->cpb; - bool freeze = false; + void __iomem *port_base = inic_port_base(ap); + int rw = qc->tf.flags & ATA_TFLAG_WRITE; - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", - irq_stat, idma_stat); + /* make sure device sees PRD table writes */ + wmb(); - inic_stop_idma(ap); + /* load transfer length */ + writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); - if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { - ata_ehi_push_desc(ehi, "hotplug"); - ata_ehi_hotplugged(ehi); - freeze = true; - } - - if (idma_stat & IDMA_STAT_PERR) { - ata_ehi_push_desc(ehi, "PCI error"); - freeze = true; - } + /* turn on DMA and specify data direction */ + pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; + if (!rw) + pp->cached_prdctl |= PRD_CTL_WR; + writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); - if (idma_stat & IDMA_STAT_CPBERR) { - ata_ehi_push_desc(ehi, "CPB error"); + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} - if (cpb->resp_flags & CPB_RESP_IGNORED) { - __ata_ehi_push_desc(ehi, " ignored"); - ehi->err_mask |= AC_ERR_INVALID; - freeze = true; - } +static void inic_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct inic_port_priv *pp = ap->private_data; + void __iomem *port_base = inic_port_base(ap); - if (cpb->resp_flags & CPB_RESP_ATA_ERR) - ehi->err_mask |= AC_ERR_DEV; + /* start host DMA transaction */ + pp->cached_prdctl |= PRD_CTL_START; + writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); +} - if (cpb->resp_flags & CPB_RESP_SPURIOUS) { - __ata_ehi_push_desc(ehi, " spurious-intr"); - ehi->err_mask |= AC_ERR_HSM; - freeze = true; - } +static void inic_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct inic_port_priv *pp = ap->private_data; + void __iomem *port_base = inic_port_base(ap); - if (cpb->resp_flags & - (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { - __ata_ehi_push_desc(ehi, " data-over/underflow"); - ehi->err_mask |= AC_ERR_HSM; - freeze = true; - } - } + /* stop DMA engine */ + writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); +} - if (freeze) - ata_port_freeze(ap); - else - ata_port_abort(ap); +static u8 inic_bmdma_status(struct ata_port *ap) +{ + /* event is already verified by the interrupt handler */ + return ATA_DMA_INTR; } static void inic_host_intr(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); + struct ata_eh_info *ehi = &ap->link.eh_info; u8 irq_stat; - u16 idma_stat; - /* read and clear IRQ status */ + /* fetch and clear irq */ irq_stat = readb(port_base + PORT_IRQ_STAT); writeb(irq_stat, port_base + PORT_IRQ_STAT); - idma_stat = readw(port_base + PORT_IDMA_STAT); - - if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) - inic_host_err_intr(ap, irq_stat, idma_stat); - if (unlikely(!qc)) - goto spurious; + if (likely(!(irq_stat & PIRQ_ERR))) { + struct ata_queued_cmd *qc = + ata_qc_from_tag(ap, ap->link.active_tag); - if (likely(idma_stat & IDMA_STAT_DONE)) { - inic_stop_idma(ap); + if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { + ap->ops->sff_check_status(ap); /* clear ATA interrupt */ + return; + } - /* Depending on circumstances, device error - * isn't reported by IDMA, check it explicitly. - */ - if (unlikely(readb(port_base + PORT_TF_COMMAND) & - (ATA_DF | ATA_ERR))) - qc->err_mask |= AC_ERR_DEV; + if (likely(ata_sff_host_intr(ap, qc))) + return; - ata_qc_complete(qc); + ap->ops->sff_check_status(ap); /* clear ATA interrupt */ + ata_port_printk(ap, KERN_WARNING, "unhandled " + "interrupt, irq_stat=%x\n", irq_stat); return; } - spurious: - ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: " - "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", - qc ? qc->tf.command : 0xff, irq_stat, idma_stat); + /* error */ + ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); + + if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); + } else + ata_port_abort(ap); } static irqreturn_t inic_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; - struct inic_host_priv *hpriv = host->private_data; + void __iomem *mmio_base = host->iomap[MMIO_BAR]; u16 host_irq_stat; int i, handled = 0;; - host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); + host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) goto out; @@ -436,173 +327,60 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance) return IRQ_RETVAL(handled); } -static int inic_check_atapi_dma(struct ata_queued_cmd *qc) -{ - /* For some reason ATAPI_PROT_DMA doesn't work for some - * commands including writes and other misc ops. Use PIO - * protocol instead, which BTW is driven by the DMA engine - * anyway, so it shouldn't make much difference for native - * SATA devices. - */ - if (atapi_cmd_type(qc->cdb[0]) == READ) - return 0; - return 1; -} - -static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) -{ - struct scatterlist *sg; - unsigned int si; - u8 flags = 0; - - if (qc->tf.flags & ATA_TFLAG_WRITE) - flags |= PRD_WRITE; - - if (ata_is_dma(qc->tf.protocol)) - flags |= PRD_DMA; - - for_each_sg(qc->sg, sg, qc->n_elem, si) { - prd->mad = cpu_to_le32(sg_dma_address(sg)); - prd->len = cpu_to_le16(sg_dma_len(sg)); - prd->flags = flags; - prd++; - } - - WARN_ON(!si); - prd[-1].flags |= PRD_END; -} - -static void inic_qc_prep(struct ata_queued_cmd *qc) -{ - struct inic_port_priv *pp = qc->ap->private_data; - struct inic_pkt *pkt = pp->pkt; - struct inic_cpb *cpb = &pkt->cpb; - struct inic_prd *prd = pkt->prd; - bool is_atapi = ata_is_atapi(qc->tf.protocol); - bool is_data = ata_is_data(qc->tf.protocol); - unsigned int cdb_len = 0; - - VPRINTK("ENTER\n"); - - if (is_atapi) - cdb_len = qc->dev->cdb_len; - - /* prepare packet, based on initio driver */ - memset(pkt, 0, sizeof(struct inic_pkt)); - - cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; - if (is_atapi || is_data) - cpb->ctl_flags |= CPB_CTL_DATA; - - cpb->len = cpu_to_le32(qc->nbytes + cdb_len); - cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); - - cpb->device = qc->tf.device; - cpb->feature = qc->tf.feature; - cpb->nsect = qc->tf.nsect; - cpb->lbal = qc->tf.lbal; - cpb->lbam = qc->tf.lbam; - cpb->lbah = qc->tf.lbah; - - if (qc->tf.flags & ATA_TFLAG_LBA48) { - cpb->hob_feature = qc->tf.hob_feature; - cpb->hob_nsect = qc->tf.hob_nsect; - cpb->hob_lbal = qc->tf.hob_lbal; - cpb->hob_lbam = qc->tf.hob_lbam; - cpb->hob_lbah = qc->tf.hob_lbah; - } - - cpb->command = qc->tf.command; - /* don't load ctl - dunno why. it's like that in the initio driver */ - - /* setup PRD for CDB */ - if (is_atapi) { - memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); - prd->mad = cpu_to_le32(pp->pkt_dma + - offsetof(struct inic_pkt, cdb)); - prd->len = cpu_to_le16(cdb_len); - prd->flags = PRD_CDB | PRD_WRITE; - if (!is_data) - prd->flags |= PRD_END; - prd++; - } - - /* setup sg table */ - if (is_data) - inic_fill_sg(prd, qc); - - pp->cpb_tbl[0] = pp->pkt_dma; -} - static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - void __iomem *port_base = inic_port_base(ap); - - /* fire up the ADMA engine */ - writew(HCTL_FTHD0, port_base + HOST_CTL); - writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); - writeb(0, port_base + PORT_CPB_PTQFIFO); - - return 0; -} - -static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) -{ - void __iomem *port_base = inic_port_base(ap); - - tf->feature = readb(port_base + PORT_TF_FEATURE); - tf->nsect = readb(port_base + PORT_TF_NSECT); - tf->lbal = readb(port_base + PORT_TF_LBAL); - tf->lbam = readb(port_base + PORT_TF_LBAM); - tf->lbah = readb(port_base + PORT_TF_LBAH); - tf->device = readb(port_base + PORT_TF_DEVICE); - tf->command = readb(port_base + PORT_TF_COMMAND); -} -static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) -{ - struct ata_taskfile *rtf = &qc->result_tf; - struct ata_taskfile tf; - - /* FIXME: Except for status and error, result TF access - * doesn't work. I tried reading from BAR0/2, CPB and BAR5. - * None works regardless of which command interface is used. - * For now return true iff status indicates device error. - * This means that we're reporting bogus sector for RW - * failures. Eeekk.... + /* ATA IRQ doesn't wait for DMA transfer completion and vice + * versa. Mask IRQ selectively to detect command completion. + * Without it, ATA DMA read command can cause data corruption. + * + * Something similar might be needed for ATAPI writes. I + * tried a lot of combinations but couldn't find the solution. */ - inic_tf_read(qc->ap, &tf); + if (qc->tf.protocol == ATA_PROT_DMA && + !(qc->tf.flags & ATA_TFLAG_WRITE)) + inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); + else + inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); - if (!(tf.command & ATA_ERR)) - return false; + /* Issuing a command to yet uninitialized port locks up the + * controller. Most of the time, this happens for the first + * command after reset which are ATA and ATAPI IDENTIFYs. + * Fast fail if stat is 0x7f or 0xff for those commands. + */ + if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || + qc->tf.command == ATA_CMD_ID_ATAPI)) { + u8 stat = ap->ops->sff_check_status(ap); + if (stat == 0x7f || stat == 0xff) + return AC_ERR_HSM; + } - rtf->command = tf.command; - rtf->feature = tf.feature; - return true; + return ata_sff_qc_issue(qc); } static void inic_freeze(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); - writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); + __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); + + ap->ops->sff_check_status(ap); writeb(0xff, port_base + PORT_IRQ_STAT); + + readb(port_base + PORT_IRQ_STAT); /* flush */ } static void inic_thaw(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); + ap->ops->sff_check_status(ap); writeb(0xff, port_base + PORT_IRQ_STAT); - writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); -} -static int inic_check_ready(struct ata_link *link) -{ - void __iomem *port_base = inic_port_base(link->ap); + __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); - return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); + readb(port_base + PORT_IRQ_STAT); /* flush */ } /* @@ -616,15 +394,17 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, void __iomem *port_base = inic_port_base(ap); void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + u16 val; int rc; /* hammer it into sane state */ inic_reset_port(port_base); - writew(IDMA_CTL_RST_ATA, idma_ctl); + val = readw(idma_ctl); + writew(val | IDMA_CTL_RST_ATA, idma_ctl); readw(idma_ctl); /* flush */ msleep(1); - writew(0, idma_ctl); + writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); rc = sata_link_resume(link, timing, deadline); if (rc) { @@ -638,7 +418,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, struct ata_taskfile tf; /* wait for link to become ready */ - rc = ata_wait_after_reset(link, deadline, inic_check_ready); + rc = ata_sff_wait_after_reset(link, 1, deadline); /* link occupied, -ENODEV too is an error */ if (rc) { ata_link_printk(link, KERN_WARNING, "device not ready " @@ -646,7 +426,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, return rc; } - inic_tf_read(ap, &tf); + ata_sff_tf_read(ap, &tf); *class = ata_dev_classify(&tf); } @@ -656,8 +436,18 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, static void inic_error_handler(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); + struct inic_port_priv *pp = ap->private_data; + unsigned long flags; + /* reset PIO HSM and stop DMA engine */ inic_reset_port(port_base); + + spin_lock_irqsave(ap->lock, flags); + ap->hsm_task_state = HSM_ST_IDLE; + writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); + spin_unlock_irqrestore(ap->lock, flags); + + /* PIO and DMA engines have been stopped, perform recovery */ ata_std_error_handler(ap); } @@ -668,18 +458,26 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc) inic_reset_port(inic_port_base(qc->ap)); } +static void inic_dev_config(struct ata_device *dev) +{ + /* inic can only handle upto LBA28 max sectors */ + if (dev->max_sectors > ATA_MAX_SECTORS) + dev->max_sectors = ATA_MAX_SECTORS; + + if (dev->n_sectors >= 1 << 28) { + ata_dev_printk(dev, KERN_ERR, + "ERROR: This driver doesn't support LBA48 yet and may cause\n" + " data corruption on such devices. Disabling.\n"); + ata_dev_disable(dev); + } +} + static void init_port(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); - struct inic_port_priv *pp = ap->private_data; - /* clear packet and CPB table */ - memset(pp->pkt, 0, sizeof(struct inic_pkt)); - memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); - - /* setup PRD and CPB lookup table addresses */ + /* Setup PRD address */ writel(ap->prd_dma, port_base + PORT_PRD_ADDR); - writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); } static int inic_port_resume(struct ata_port *ap) @@ -690,30 +488,28 @@ static int inic_port_resume(struct ata_port *ap) static int inic_port_start(struct ata_port *ap) { - struct device *dev = ap->host->dev; + void __iomem *port_base = inic_port_base(ap); struct inic_port_priv *pp; + u8 tmp; int rc; /* alloc and initialize private data */ - pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; ap->private_data = pp; + /* default PRD_CTL value, DMAEN, WR and START off */ + tmp = readb(port_base + PORT_PRD_CTL); + tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); + pp->dfl_prdctl = tmp; + /* Alloc resources */ rc = ata_port_start(ap); - if (rc) + if (rc) { + kfree(pp); return rc; - - pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), - &pp->pkt_dma, GFP_KERNEL); - if (!pp->pkt) - return -ENOMEM; - - pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, - &pp->cpb_tbl_dma, GFP_KERNEL); - if (!pp->cpb_tbl) - return -ENOMEM; + } init_port(ap); @@ -721,18 +517,21 @@ static int inic_port_start(struct ata_port *ap) } static struct ata_port_operations inic_port_ops = { - .inherits = &sata_port_ops, + .inherits = &ata_sff_port_ops, - .check_atapi_dma = inic_check_atapi_dma, - .qc_prep = inic_qc_prep, + .bmdma_setup = inic_bmdma_setup, + .bmdma_start = inic_bmdma_start, + .bmdma_stop = inic_bmdma_stop, + .bmdma_status = inic_bmdma_status, .qc_issue = inic_qc_issue, - .qc_fill_rtf = inic_qc_fill_rtf, .freeze = inic_freeze, .thaw = inic_thaw, + .softreset = ATA_OP_NULL, /* softreset is broken */ .hardreset = inic_hardreset, .error_handler = inic_error_handler, .post_internal_cmd = inic_post_internal_cmd, + .dev_config = inic_dev_config, .scr_read = inic_scr_read, .scr_write = inic_scr_write, @@ -742,6 +541,12 @@ static struct ata_port_operations inic_port_ops = { }; static struct ata_port_info inic_port_info = { + /* For some reason, ATAPI_PROT_PIO is broken on this + * controller, and no, PIO_POLLING does't fix it. It somehow + * manages to report the wrong ireason and ignoring ireason + * results in machine lock up. Tell libata to always prefer + * DMA. + */ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ @@ -794,6 +599,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct inic_host_priv *hpriv = host->private_data; + void __iomem *mmio_base = host->iomap[MMIO_BAR]; int rc; rc = ata_pci_device_do_resume(pdev); @@ -801,7 +607,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); + rc = init_controller(mmio_base, hpriv->cached_hctl); if (rc) return rc; } @@ -819,7 +625,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct ata_host *host; struct inic_host_priv *hpriv; void __iomem * const *iomap; - int mmio_bar; int i, rc; if (!printed_version++) @@ -833,32 +638,39 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) host->private_data = hpriv; - /* Acquire resources and fill host. Note that PCI and cardbus - * use different BARs. - */ + /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; - if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) - mmio_bar = MMIO_BAR_PCI; - else - mmio_bar = MMIO_BAR_CARDBUS; - - rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); + rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); if (rc) return rc; host->iomap = iomap = pcim_iomap_table(pdev); - hpriv->mmio_base = iomap[mmio_bar]; - hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); for (i = 0; i < NR_PORTS; i++) { struct ata_port *ap = host->ports[i]; - - ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); - ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); + struct ata_ioports *port = &ap->ioaddr; + unsigned int offset = i * PORT_SIZE; + + port->cmd_addr = iomap[2 * i]; + port->altstatus_addr = + port->ctl_addr = (void __iomem *) + ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); + port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; + + ata_sff_std_ports(port); + + ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); + ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); + ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", + (unsigned long long)pci_resource_start(pdev, 2 * i), + (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | + ATA_PCI_CTL_OFS); } + hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); + /* Set dma_mask. This devices doesn't support 64bit addressing. */ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (rc) { @@ -886,7 +698,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } - rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); + rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "failed to initialize controller\n"); diff --git a/trunk/drivers/ata/sata_mv.c b/trunk/drivers/ata/sata_mv.c index bb73b2222627..842b1a15b78c 100644 --- a/trunk/drivers/ata/sata_mv.c +++ b/trunk/drivers/ata/sata_mv.c @@ -65,7 +65,6 @@ #include #include #include -#include #include #include #include @@ -92,9 +91,9 @@ enum { MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), MV_SATAHC0_REG_BASE = 0x20000, - MV_FLASH_CTL_OFS = 0x1046c, - MV_GPIO_PORT_CTL_OFS = 0x104f0, - MV_RESET_CFG_OFS = 0x180d8, + MV_FLASH_CTL = 0x1046c, + MV_GPIO_PORT_CTL = 0x104f0, + MV_RESET_CFG = 0x180d8, MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, @@ -148,21 +147,18 @@ enum { /* PCI interface registers */ PCI_COMMAND_OFS = 0xc00, - PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ PCI_MAIN_CMD_STS_OFS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), GLOB_SFT_RST = (1 << 4), - MV_PCI_MODE_OFS = 0xd00, - MV_PCI_MODE_MASK = 0x30, - + MV_PCI_MODE = 0xd00, MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, MV_PCI_DISC_TIMER = 0xd04, MV_PCI_MSI_TRIGGER = 0xc38, MV_PCI_SERR_MASK = 0xc28, - MV_PCI_XBAR_TMOUT_OFS = 0x1d04, + MV_PCI_XBAR_TMOUT = 0x1d04, MV_PCI_ERR_LOW_ADDRESS = 0x1d40, MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, MV_PCI_ERR_ATTRIBUTE = 0x1d48, @@ -229,18 +225,16 @@ enum { PHY_MODE4 = 0x314, PHY_MODE2 = 0x330, SATA_IFCTL_OFS = 0x344, - SATA_TESTCTL_OFS = 0x348, SATA_IFSTAT_OFS = 0x34c, VENDOR_UNIQUE_FIS_OFS = 0x35c, - FISCFG_OFS = 0x360, - FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ - FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ + FIS_CFG_OFS = 0x360, + FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ MV5_PHY_MODE = 0x74, - MV5_LTMODE_OFS = 0x30, - MV5_PHY_CTL_OFS = 0x0C, - SATA_INTERFACE_CFG_OFS = 0x050, + MV5_LT_MODE = 0x30, + MV5_PHY_CTL = 0x0C, + SATA_INTERFACE_CFG = 0x050, MV_M2_PREAMP_MASK = 0x7e0, @@ -338,16 +332,10 @@ enum { EDMA_CMD_OFS = 0x28, /* EDMA command register */ EDMA_EN = (1 << 0), /* enable EDMA */ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ - EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ - - EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ - EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ - EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ + ATA_RST = (1 << 2), /* reset trans/link/phy */ - EDMA_IORDY_TMOUT_OFS = 0x34, - EDMA_ARB_CFG_OFS = 0x38, - - EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ + EDMA_IORDY_TMOUT = 0x34, + EDMA_ARB_CFG = 0x38, GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ @@ -362,19 +350,15 @@ enum { MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ - MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ - MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ - MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ }; #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) -#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) @@ -449,7 +433,6 @@ struct mv_port_priv { unsigned int resp_idx; u32 pp_flags; - unsigned int delayed_eh_pmp_map; }; struct mv_port_signal { @@ -496,7 +479,6 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); -static int mv_qc_defer(struct ata_queued_cmd *qc); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); @@ -545,9 +527,6 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); -static void mv_pmp_error_handler(struct ata_port *ap); -static void mv_process_crpb_entries(struct ata_port *ap, - struct mv_port_priv *pp); /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below * because we have to allow room for worst case splitting of @@ -569,7 +548,6 @@ static struct scsi_host_template mv6_sht = { static struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, - .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, @@ -588,6 +566,7 @@ static struct ata_port_operations mv5_ops = { static struct ata_port_operations mv6_ops = { .inherits = &mv5_ops, + .qc_defer = sata_pmp_qc_defer_cmd_switch, .dev_config = mv6_dev_config, .scr_read = mv_scr_read, .scr_write = mv_scr_write, @@ -595,11 +574,12 @@ static struct ata_port_operations mv6_ops = { .pmp_hardreset = mv_pmp_hardreset, .pmp_softreset = mv_softreset, .softreset = mv_softreset, - .error_handler = mv_pmp_error_handler, + .error_handler = sata_pmp_error_handler, }; static struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, + .qc_defer = ata_std_qc_defer, /* FIS-based switching */ .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, }; @@ -895,29 +875,6 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, } } -static void mv_wait_for_edma_empty_idle(struct ata_port *ap) -{ - void __iomem *port_mmio = mv_ap_base(ap); - const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); - const int per_loop = 5, timeout = (15 * 1000 / per_loop); - int i; - - /* - * Wait for the EDMA engine to finish transactions in progress. - * No idea what a good "timeout" value might be, but measurements - * indicate that it often requires hundreds of microseconds - * with two drives in-use. So we use the 15msec value above - * as a rough guess at what even more drives might require. - */ - for (i = 0; i < timeout; ++i) { - u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); - if ((edma_stat & empty_idle) == empty_idle) - break; - udelay(per_loop); - } - /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ -} - /** * mv_stop_edma_engine - Disable eDMA engine * @port_mmio: io base address @@ -950,7 +907,6 @@ static int mv_stop_edma(struct ata_port *ap) if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; - mv_wait_for_edma_empty_idle(ap); if (mv_stop_edma_engine(port_mmio)) { ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); return -EIO; @@ -1101,95 +1057,26 @@ static void mv6_dev_config(struct ata_device *adev) } } -static int mv_qc_defer(struct ata_queued_cmd *qc) +static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) { - struct ata_link *link = qc->dev->link; - struct ata_port *ap = link->ap; - struct mv_port_priv *pp = ap->private_data; - - /* - * Don't allow new commands if we're in a delayed EH state - * for NCQ and/or FIS-based switching. - */ - if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) - return ATA_DEFER_PORT; + u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; /* - * If the port is completely idle, then allow the new qc. + * Various bit settings required for operation + * in FIS-based switching (fbs) mode on GenIIe: */ - if (ap->nr_active_links == 0) - return 0; - - if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { - /* - * The port is operating in host queuing mode (EDMA). - * It can accomodate a new qc if the qc protocol - * is compatible with the current host queue mode. - */ - if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { - /* - * The host queue (EDMA) is in NCQ mode. - * If the new qc is also an NCQ command, - * then allow the new qc. - */ - if (qc->tf.protocol == ATA_PROT_NCQ) - return 0; - } else { - /* - * The host queue (EDMA) is in non-NCQ, DMA mode. - * If the new qc is also a non-NCQ, DMA command, - * then allow the new qc. - */ - if (qc->tf.protocol == ATA_PROT_DMA) - return 0; - } - } - return ATA_DEFER_PORT; -} - -static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) -{ - u32 new_fiscfg, old_fiscfg; - u32 new_ltmode, old_ltmode; - u32 new_haltcond, old_haltcond; - - old_fiscfg = readl(port_mmio + FISCFG_OFS); - old_ltmode = readl(port_mmio + LTMODE_OFS); - old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); - - new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); - new_ltmode = old_ltmode & ~LTMODE_BIT8; - new_haltcond = old_haltcond | EDMA_ERR_DEV; - - if (want_fbs) { - new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; - new_ltmode = old_ltmode | LTMODE_BIT8; - if (want_ncq) - new_haltcond &= ~EDMA_ERR_DEV; - else - new_fiscfg |= FISCFG_WAIT_DEV_ERR; - } - - if (new_fiscfg != old_fiscfg) - writelfl(new_fiscfg, port_mmio + FISCFG_OFS); + old_fcfg = readl(port_mmio + FIS_CFG_OFS); + old_ltmode = readl(port_mmio + LTMODE_OFS); + if (enable_fbs) { + new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; + new_ltmode = old_ltmode | LTMODE_BIT8; + } else { /* disable fbs */ + new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; + new_ltmode = old_ltmode & ~LTMODE_BIT8; + } + if (new_fcfg != old_fcfg) + writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); if (new_ltmode != old_ltmode) writelfl(new_ltmode, port_mmio + LTMODE_OFS); - if (new_haltcond != old_haltcond) - writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); -} - -static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) -{ - struct mv_host_priv *hpriv = ap->host->private_data; - u32 old, new; - - /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ - old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); - if (want_ncq) - new = old | (1 << 22); - else - new = old & ~(1 << 22); - if (new != old) - writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); } static void mv_edma_cfg(struct ata_port *ap, int want_ncq) @@ -1201,40 +1088,25 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) /* set up non-NCQ EDMA configuration */ cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ - pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ - else if (IS_GEN_II(hpriv)) { + else if (IS_GEN_II(hpriv)) cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; - mv_60x1_errata_sata25(ap, want_ncq); - } else if (IS_GEN_IIE(hpriv)) { - int want_fbs = sata_pmp_attached(ap); - /* - * Possible future enhancement: - * - * The chip can use FBS with non-NCQ, if we allow it, - * But first we need to have the error handling in place - * for this mode (datasheet section 7.3.15.4.2.3). - * So disallow non-NCQ FBS for now. - */ - want_fbs &= want_ncq; - - mv_config_fbs(port_mmio, want_ncq, want_fbs); + else if (IS_GEN_IIE(hpriv)) { + cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ + cfg |= (1 << 22); /* enab 4-entry host queue cache */ + cfg |= (1 << 18); /* enab early completion */ + cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ - if (want_fbs) { - pp->pp_flags |= MV_PP_FLAG_FBS_EN; + if (want_ncq && sata_pmp_attached(ap)) { cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ + mv_config_fbs(port_mmio, 1); + } else { + mv_config_fbs(port_mmio, 0); } - - cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ - cfg |= (1 << 22); /* enab 4-entry host queue cache */ - if (HAS_PCI(ap->host)) - cfg |= (1 << 18); /* enab early completion */ - if (hpriv->hp_flags & MV_HP_CUT_THROUGH) - cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ } if (want_ncq) { @@ -1611,186 +1483,25 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) return qc; } -static void mv_pmp_error_handler(struct ata_port *ap) -{ - unsigned int pmp, pmp_map; - struct mv_port_priv *pp = ap->private_data; - - if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { - /* - * Perform NCQ error analysis on failed PMPs - * before we freeze the port entirely. - * - * The failed PMPs are marked earlier by mv_pmp_eh_prep(). - */ - pmp_map = pp->delayed_eh_pmp_map; - pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; - for (pmp = 0; pmp_map != 0; pmp++) { - unsigned int this_pmp = (1 << pmp); - if (pmp_map & this_pmp) { - struct ata_link *link = &ap->pmp_link[pmp]; - pmp_map &= ~this_pmp; - ata_eh_analyze_ncq_error(link); - } - } - ata_port_freeze(ap); - } - sata_pmp_error_handler(ap); -} - -static unsigned int mv_get_err_pmp_map(struct ata_port *ap) -{ - void __iomem *port_mmio = mv_ap_base(ap); - - return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; -} - -static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) -{ - struct ata_eh_info *ehi; - unsigned int pmp; - - /* - * Initialize EH info for PMPs which saw device errors - */ - ehi = &ap->link.eh_info; - for (pmp = 0; pmp_map != 0; pmp++) { - unsigned int this_pmp = (1 << pmp); - if (pmp_map & this_pmp) { - struct ata_link *link = &ap->pmp_link[pmp]; - - pmp_map &= ~this_pmp; - ehi = &link->eh_info; - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "dev err"); - ehi->err_mask |= AC_ERR_DEV; - ehi->action |= ATA_EH_RESET; - ata_link_abort(link); - } - } -} - -static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) +static void mv_unexpected_intr(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; - int failed_links; - unsigned int old_map, new_map; - - /* - * Device error during FBS+NCQ operation: - * - * Set a port flag to prevent further I/O being enqueued. - * Leave the EDMA running to drain outstanding commands from this port. - * Perform the post-mortem/EH only when all responses are complete. - * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). - */ - if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { - pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; - pp->delayed_eh_pmp_map = 0; - } - old_map = pp->delayed_eh_pmp_map; - new_map = old_map | mv_get_err_pmp_map(ap); - - if (old_map != new_map) { - pp->delayed_eh_pmp_map = new_map; - mv_pmp_eh_prep(ap, new_map & ~old_map); - } - failed_links = hweight16(new_map); - - ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " - "failed_links=%d nr_active_links=%d\n", - __func__, pp->delayed_eh_pmp_map, - ap->qc_active, failed_links, - ap->nr_active_links); - - if (ap->nr_active_links <= failed_links) { - mv_process_crpb_entries(ap, pp); - mv_stop_edma(ap); - mv_eh_freeze(ap); - ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); - return 1; /* handled */ - } - ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); - return 1; /* handled */ -} + struct ata_eh_info *ehi = &ap->link.eh_info; + char *when = ""; -static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) -{ /* - * Possible future enhancement: - * - * FBS+non-NCQ operation is not yet implemented. - * See related notes in mv_edma_cfg(). - * - * Device error during FBS+non-NCQ operation: - * - * We need to snapshot the shadow registers for each failed command. - * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). + * We got a device interrupt from something that + * was supposed to be using EDMA or polling. */ - return 0; /* not handled */ -} - -static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) -{ - struct mv_port_priv *pp = ap->private_data; - - if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) - return 0; /* EDMA was not active: not handled */ - if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) - return 0; /* FBS was not active: not handled */ - - if (!(edma_err_cause & EDMA_ERR_DEV)) - return 0; /* non DEV error: not handled */ - edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; - if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) - return 0; /* other problems: not handled */ - - if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { - /* - * EDMA should NOT have self-disabled for this case. - * If it did, then something is wrong elsewhere, - * and we cannot handle it here. - */ - if (edma_err_cause & EDMA_ERR_SELF_DIS) { - ata_port_printk(ap, KERN_WARNING, - "%s: err_cause=0x%x pp_flags=0x%x\n", - __func__, edma_err_cause, pp->pp_flags); - return 0; /* not handled */ - } - return mv_handle_fbs_ncq_dev_err(ap); - } else { - /* - * EDMA should have self-disabled for this case. - * If it did not, then something is wrong elsewhere, - * and we cannot handle it here. - */ - if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { - ata_port_printk(ap, KERN_WARNING, - "%s: err_cause=0x%x pp_flags=0x%x\n", - __func__, edma_err_cause, pp->pp_flags); - return 0; /* not handled */ - } - return mv_handle_fbs_non_ncq_dev_err(ap); - } - return 0; /* not handled */ -} - -static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) -{ - struct ata_eh_info *ehi = &ap->link.eh_info; - char *when = "idle"; - ata_ehi_clear_desc(ehi); - if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { - when = "disabled"; - } else if (edma_was_enabled) { - when = "EDMA enabled"; + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { + when = " while EDMA enabled"; } else { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) - when = "polling"; + when = " while polling"; } - ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); + ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when); ehi->err_mask |= AC_ERR_OTHER; ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); @@ -1808,7 +1519,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) * LOCKING: * Inherited from caller. */ -static void mv_err_intr(struct ata_port *ap) +static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, eh_freeze_mask, serr = 0; @@ -1816,42 +1527,24 @@ static void mv_err_intr(struct ata_port *ap) struct mv_host_priv *hpriv = ap->host->private_data; unsigned int action = 0, err_mask = 0; struct ata_eh_info *ehi = &ap->link.eh_info; - struct ata_queued_cmd *qc; - int abort = 0; + + ata_ehi_clear_desc(ehi); /* - * Read and clear the SError and err_cause bits. + * Read and clear the err_cause bits. This won't actually + * clear for some errors (eg. SError), but we will be doing + * a hard reset in those cases regardless, which *will* clear it. */ - sata_scr_read(&ap->link, SCR_ERROR, &serr); - sata_scr_write_flush(&ap->link, SCR_ERROR, serr); - edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); - ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", - __func__, edma_err_cause, pp->pp_flags); - - if (edma_err_cause & EDMA_ERR_DEV) { - /* - * Device errors during FIS-based switching operation - * require special handling. - */ - if (mv_handle_dev_err(ap, edma_err_cause)) - return; - } + ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); - qc = mv_get_active_qc(ap); - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", - edma_err_cause, pp->pp_flags); /* * All generations share these EDMA error cause bits: */ - if (edma_err_cause & EDMA_ERR_DEV) { + if (edma_err_cause & EDMA_ERR_DEV) err_mask |= AC_ERR_DEV; - action |= ATA_EH_RESET; - ata_ehi_push_desc(ehi, "dev error"); - } if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR)) { @@ -1883,6 +1576,13 @@ static void mv_err_intr(struct ata_port *ap) ata_ehi_push_desc(ehi, "EDMA self-disable"); } if (edma_err_cause & EDMA_ERR_SERR) { + /* + * Ensure that we read our own SCR, not a pmp link SCR: + */ + ap->ops->scr_read(ap, SCR_ERROR, &serr); + /* + * Don't clear SError here; leave it for libata-eh: + */ ata_ehi_push_desc(ehi, "SError=%08x", serr); err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; @@ -1902,29 +1602,10 @@ static void mv_err_intr(struct ata_port *ap) else ehi->err_mask |= err_mask; - if (err_mask == AC_ERR_DEV) { - /* - * Cannot do ata_port_freeze() here, - * because it would kill PIO access, - * which is needed for further diagnosis. - */ - mv_eh_freeze(ap); - abort = 1; - } else if (edma_err_cause & eh_freeze_mask) { - /* - * Note to self: ata_port_freeze() calls ata_port_abort() - */ + if (edma_err_cause & eh_freeze_mask) ata_port_freeze(ap); - } else { - abort = 1; - } - - if (abort) { - if (qc) - ata_link_abort(qc->dev->link); - else - ata_port_abort(ap); - } + else + ata_port_abort(ap); } static void mv_process_crpb_response(struct ata_port *ap, @@ -1951,9 +1632,8 @@ static void mv_process_crpb_response(struct ata_port *ap, } } ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; - if (!ac_err_mask(ata_status)) - ata_qc_complete(qc); - /* else: leave it for mv_err_intr() */ + qc->err_mask |= ac_err_mask(ata_status); + ata_qc_complete(qc); } else { ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", __func__, tag); @@ -1997,44 +1677,6 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); } -static void mv_port_intr(struct ata_port *ap, u32 port_cause) -{ - struct mv_port_priv *pp; - int edma_was_enabled; - - if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { - mv_unexpected_intr(ap, 0); - return; - } - /* - * Grab a snapshot of the EDMA_EN flag setting, - * so that we have a consistent view for this port, - * even if something we call of our routines changes it. - */ - pp = ap->private_data; - edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); - /* - * Process completed CRPB response(s) before other events. - */ - if (edma_was_enabled && (port_cause & DONE_IRQ)) { - mv_process_crpb_entries(ap, pp); - if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) - mv_handle_fbs_ncq_dev_err(ap); - } - /* - * Handle chip-reported errors, or continue on to handle PIO. - */ - if (unlikely(port_cause & ERR_IRQ)) { - mv_err_intr(ap); - } else if (!edma_was_enabled) { - struct ata_queued_cmd *qc = mv_get_active_qc(ap); - if (qc) - ata_sff_host_intr(ap, qc); - else - mv_unexpected_intr(ap, edma_was_enabled); - } -} - /** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure @@ -2046,58 +1688,66 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) { struct mv_host_priv *hpriv = host->private_data; - void __iomem *mmio = hpriv->base, *hc_mmio; + void __iomem *mmio = hpriv->base, *hc_mmio = NULL; + u32 hc_irq_cause = 0; unsigned int handled = 0, port; for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *ap = host->ports[port]; - unsigned int p, shift, hardport, port_cause; - + struct mv_port_priv *pp; + unsigned int shift, hardport, port_cause; + /* + * When we move to the second hc, flag our cached + * copies of hc_mmio (and hc_irq_cause) as invalid again. + */ + if (port == MV_PORTS_PER_HC) + hc_mmio = NULL; + /* + * Do nothing if port is not interrupting or is disabled: + */ MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); + port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); + if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED)) + continue; /* - * Each hc within the host has its own hc_irq_cause register, - * where the interrupting ports bits get ack'd. + * Each hc within the host has its own hc_irq_cause register. + * We defer reading it until we know we need it, right now: + * + * FIXME later: we don't really need to read this register + * (some logic changes required below if we go that way), + * because it doesn't tell us anything new. But we do need + * to write to it, outside the top of this loop, + * to reset the interrupt triggers for next time. */ - if (hardport == 0) { /* first port on this hc ? */ - u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; - u32 port_mask, ack_irqs; - /* - * Skip this entire hc if nothing pending for any ports - */ - if (!hc_cause) { - port += MV_PORTS_PER_HC - 1; - continue; - } - /* - * We don't need/want to read the hc_irq_cause register, - * because doing so hurts performance, and - * main_irq_cause already gives us everything we need. - * - * But we do have to *write* to the hc_irq_cause to ack - * the ports that we are handling this time through. - * - * This requires that we create a bitmap for those - * ports which interrupted us, and use that bitmap - * to ack (only) those ports via hc_irq_cause. - */ - ack_irqs = 0; - for (p = 0; p < MV_PORTS_PER_HC; ++p) { - if ((port + p) >= hpriv->n_ports) - break; - port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); - if (hc_cause & port_mask) - ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; - } + if (!hc_mmio) { hc_mmio = mv_hc_base_from_port(mmio, port); - writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); + writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); handled = 1; } /* - * Handle interrupts signalled for this port: + * Process completed CRPB response(s) before other events. */ - port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); - if (port_cause) - mv_port_intr(ap, port_cause); + pp = ap->private_data; + if (hc_irq_cause & (DMA_IRQ << hardport)) { + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) + mv_process_crpb_entries(ap, pp); + } + /* + * Handle chip-reported errors, or continue on to handle PIO. + */ + if (unlikely(port_cause & ERR_IRQ)) { + mv_err_intr(ap, mv_get_active_qc(ap)); + } else if (hc_irq_cause & (DEV_IRQ << hardport)) { + if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { + struct ata_queued_cmd *qc = mv_get_active_qc(ap); + if (qc) { + ata_sff_host_intr(ap, qc); + continue; + } + } + mv_unexpected_intr(ap); + } } return handled; } @@ -2244,7 +1894,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { - writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); + writel(0x0fcfffff, mmio + MV_FLASH_CTL); } static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, @@ -2263,7 +1913,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; - writel(0, mmio + MV_GPIO_PORT_CTL_OFS); + writel(0, mmio + MV_GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ @@ -2281,14 +1931,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { - tmp = readl(phy_mmio + MV5_LTMODE_OFS); + tmp = readl(phy_mmio + MV5_LT_MODE); tmp |= (1 << 19); - writel(tmp, phy_mmio + MV5_LTMODE_OFS); + writel(tmp, phy_mmio + MV5_LT_MODE); - tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); + tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; tmp |= 0x1; - writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); + writel(tmp, phy_mmio + MV5_PHY_CTL); } tmp = readl(phy_mmio + MV5_PHY_MODE); @@ -2306,6 +1956,11 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, { void __iomem *port_mmio = mv_port_base(mmio, port); + /* + * The datasheet warns against setting ATA_RST when EDMA is active + * (but doesn't say what the problem might be). So we first try + * to disable the EDMA engine before doing the ATA_RST operation. + */ mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ @@ -2320,7 +1975,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ - writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); + writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO @@ -2366,13 +2021,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) struct mv_host_priv *hpriv = host->private_data; u32 tmp; - tmp = readl(mmio + MV_PCI_MODE_OFS); + tmp = readl(mmio + MV_PCI_MODE); tmp &= 0xff00ffff; - writel(tmp, mmio + MV_PCI_MODE_OFS); + writel(tmp, mmio + MV_PCI_MODE); ZERO(MV_PCI_DISC_TIMER); ZERO(MV_PCI_MSI_TRIGGER); - writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); + writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); ZERO(MV_PCI_SERR_MASK); ZERO(hpriv->irq_cause_ofs); @@ -2390,10 +2045,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) mv5_reset_flash(hpriv, mmio); - tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); + tmp = readl(mmio + MV_GPIO_PORT_CTL); tmp &= 0x3; tmp |= (1 << 5) | (1 << 6); - writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); + writel(tmp, mmio + MV_GPIO_PORT_CTL); } /** @@ -2466,7 +2121,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *port_mmio; u32 tmp; - tmp = readl(mmio + MV_RESET_CFG_OFS); + tmp = readl(mmio + MV_RESET_CFG); if ((tmp & (1 << 0)) == 0) { hpriv->signal[idx].amps = 0x7 << 8; hpriv->signal[idx].pre = 0x1 << 5; @@ -2482,7 +2137,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { - writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); + writel(0x00000060, mmio + MV_GPIO_PORT_CTL); } static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, @@ -2580,6 +2235,11 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, { void __iomem *port_mmio = mv_port_base(mmio, port); + /* + * The datasheet warns against setting ATA_RST when EDMA is active + * (but doesn't say what the problem might be). So we first try + * to disable the EDMA engine before doing the ATA_RST operation. + */ mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ @@ -2594,7 +2254,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ - writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); + writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO @@ -2637,39 +2297,38 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) return; } -static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) +static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) { - u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); + u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); - ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ + ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ if (want_gen2i) - ifcfg |= (1 << 7); /* enable gen2i speed */ - writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); + ifctl |= (1 << 7); /* enable gen2i speed */ + writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); } +/* + * Caller must ensure that EDMA is not active, + * by first doing mv_stop_edma() where needed. + */ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no) { void __iomem *port_mmio = mv_port_base(mmio, port_no); - /* - * The datasheet warns against setting EDMA_RESET when EDMA is active - * (but doesn't say what the problem might be). So we first try - * to disable the EDMA engine before doing the EDMA_RESET operation. - */ mv_stop_edma_engine(port_mmio); - writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); + writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); if (!IS_GEN_I(hpriv)) { - /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ - mv_setup_ifcfg(port_mmio, 1); + /* Enable 3.0gb/s link speed */ + mv_setup_ifctl(port_mmio, 1); } /* - * Strobing EDMA_RESET here causes a hard reset of the SATA transport, + * Strobing ATA_RST here causes a hard reset of the SATA transport, * link, and physical layers. It resets all SATA interface registers * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. */ - writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); + writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); udelay(25); /* allow reset propagation */ writelfl(0, port_mmio + EDMA_CMD_OFS); @@ -2733,7 +2392,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, sata_scr_read(link, SCR_STATUS, &sstatus); if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { /* Force 1.5gb/s link speed and try again */ - mv_setup_ifcfg(mv_ap_base(ap), 0); + mv_setup_ifctl(mv_ap_base(ap), 0); if (time_after(jiffies + HZ, deadline)) extra = HZ; /* only extend it once, max */ } @@ -2834,34 +2493,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); } -static unsigned int mv_in_pcix_mode(struct ata_host *host) -{ - struct mv_host_priv *hpriv = host->private_data; - void __iomem *mmio = hpriv->base; - u32 reg; - - if (!HAS_PCI(host) || !IS_PCIE(hpriv)) - return 0; /* not PCI-X capable */ - reg = readl(mmio + MV_PCI_MODE_OFS); - if ((reg & MV_PCI_MODE_MASK) == 0) - return 0; /* conventional PCI mode */ - return 1; /* chip is in PCI-X mode */ -} - -static int mv_pci_cut_through_okay(struct ata_host *host) -{ - struct mv_host_priv *hpriv = host->private_data; - void __iomem *mmio = hpriv->base; - u32 reg; - - if (!mv_in_pcix_mode(host)) { - reg = readl(mmio + PCI_COMMAND_OFS); - if (reg & PCI_COMMAND_MRDTRIG) - return 0; /* not okay */ - } - return 1; /* okay */ -} - static int mv_chip_id(struct ata_host *host, unsigned int board_idx) { struct pci_dev *pdev = to_pci_dev(host->dev); @@ -2929,7 +2560,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; case chip_7042: - hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; + hp_flags |= MV_HP_PCIE; if (pdev->vendor == PCI_VENDOR_ID_TTI && (pdev->device == 0x2300 || pdev->device == 0x2310)) { @@ -2959,12 +2590,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } - /* drop through */ case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; - if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) - hp_flags |= MV_HP_CUT_THROUGH; switch (pdev->revision) { case 0x0: diff --git a/trunk/drivers/base/sys.c b/trunk/drivers/base/sys.c index 358bb0be3c08..4fbb56bcb1ee 100644 --- a/trunk/drivers/base/sys.c +++ b/trunk/drivers/base/sys.c @@ -175,7 +175,8 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) } /* Check whether this driver has already been added to a class. */ - if (drv->entry.next && !list_empty(&drv->entry)) { + if ((drv->entry.next != drv->entry.prev) || + (drv->entry.next != NULL)) { printk(KERN_WARNING "sysdev: class %s: driver (%p) has already" " been registered to a class, something is wrong, but " "will forge on!\n", cls->name, drv); diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c index 41f818be2f7e..8fc429cf82b6 100644 --- a/trunk/drivers/block/aoe/aoecmd.c +++ b/trunk/drivers/block/aoe/aoecmd.c @@ -755,13 +755,11 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector { unsigned long n_sect = bio->bi_size >> 9; const int rw = bio_data_dir(bio); - struct hd_struct *part; - part = get_part(disk, sector); - all_stat_inc(disk, part, ios[rw], sector); - all_stat_add(disk, part, ticks[rw], duration, sector); - all_stat_add(disk, part, sectors[rw], n_sect, sector); - all_stat_add(disk, part, io_ticks, duration, sector); + all_stat_inc(disk, ios[rw], sector); + all_stat_add(disk, ticks[rw], duration, sector); + all_stat_add(disk, sectors[rw], n_sect, sector); + all_stat_add(disk, io_ticks, duration, sector); } void diff --git a/trunk/drivers/char/serial167.c b/trunk/drivers/char/serial167.c index 3b23270eaa65..fd2db07a50fc 100644 --- a/trunk/drivers/char/serial167.c +++ b/trunk/drivers/char/serial167.c @@ -1073,7 +1073,7 @@ static int cy_put_char(struct tty_struct *tty, unsigned char ch) return 0; if (!info->xmit_buf) - return 0; + return; local_irq_save(flags); if (info->xmit_cnt >= PAGE_SIZE - 1) { diff --git a/trunk/drivers/char/sx.c b/trunk/drivers/char/sx.c index b1a7a8cb65ea..f39f6fd89350 100644 --- a/trunk/drivers/char/sx.c +++ b/trunk/drivers/char/sx.c @@ -970,8 +970,7 @@ static int sx_set_real_termios(void *ptr) sx_write_channel_byte(port, hi_mask, 0x1f); break; default: - printk(KERN_INFO "sx: Invalid wordsize: %u\n", - (unsigned int)CFLAG & CSIZE); + printk(KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE); break; } @@ -998,8 +997,7 @@ static int sx_set_real_termios(void *ptr) set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); } sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ", - (unsigned int)port->gs.tty->termios->c_iflag, - I_OTHER(port->gs.tty)); + port->gs.tty->termios->c_iflag, I_OTHER(port->gs.tty)); /* Tell line discipline whether we will do output cooking. * If OPOST is set and no other output flags are set then we can do output @@ -1012,8 +1010,7 @@ static int sx_set_real_termios(void *ptr) clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); } sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", - (unsigned int)port->gs.tty->termios->c_oflag, - O_OTHER(port->gs.tty)); + port->gs.tty->termios->c_oflag, O_OTHER(port->gs.tty)); /* port->c_dcd = sx_get_CD (port); */ func_exit(); return 0; diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c index fa1ffbf2c621..e458b08139af 100644 --- a/trunk/drivers/char/vt.c +++ b/trunk/drivers/char/vt.c @@ -2742,10 +2742,6 @@ static int con_open(struct tty_struct *tty, struct file *filp) tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; tty->winsize.ws_col = vc_cons[currcons].d->vc_cols; } - if (vc->vc_utf) - tty->termios->c_iflag |= IUTF8; - else - tty->termios->c_iflag &= ~IUTF8; release_console_sem(); vcs_make_sysfs(tty); return ret; @@ -2922,8 +2918,6 @@ int __init vty_init(void) console_driver->minor_start = 1; console_driver->type = TTY_DRIVER_TYPE_CONSOLE; console_driver->init_termios = tty_std_termios; - if (default_utf8) - console_driver->init_termios.c_iflag |= IUTF8; console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; tty_set_operations(console_driver, &con_ops); if (tty_register_driver(console_driver)) diff --git a/trunk/drivers/edac/edac_core.h b/trunk/drivers/edac/edac_core.h index b27b13c5eb5a..a9aa845dbe74 100644 --- a/trunk/drivers/edac/edac_core.h +++ b/trunk/drivers/edac/edac_core.h @@ -97,7 +97,7 @@ extern int edac_debug_level; #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ PCI_DEVICE_ID_ ## vend ## _ ## dev -#define edac_dev_name(dev) (dev)->dev_name +#define dev_name(dev) (dev)->dev_name /* memory devices */ enum dev_type { diff --git a/trunk/drivers/edac/edac_device.c b/trunk/drivers/edac/edac_device.c index 5fcd3d89c75d..63372fa7ecfe 100644 --- a/trunk/drivers/edac/edac_device.c +++ b/trunk/drivers/edac/edac_device.c @@ -333,7 +333,7 @@ static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev) fail0: edac_printk(KERN_WARNING, EDAC_MC, "%s (%s) %s %s already assigned %d\n", - rover->dev->bus_id, edac_dev_name(rover), + rover->dev->bus_id, dev_name(rover), rover->mod_name, rover->ctl_name, rover->dev_idx); return 1; @@ -538,7 +538,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) "'%s': DEV '%s' (%s)\n", edac_dev->mod_name, edac_dev->ctl_name, - edac_dev_name(edac_dev), + dev_name(edac_dev), edac_op_state_to_string(edac_dev->op_state)); mutex_unlock(&device_ctls_mutex); @@ -599,7 +599,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n", edac_dev->dev_idx, - edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev)); + edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev)); return edac_dev; } diff --git a/trunk/drivers/edac/edac_mc.c b/trunk/drivers/edac/edac_mc.c index d110392d48f4..a4cf1645f588 100644 --- a/trunk/drivers/edac/edac_mc.c +++ b/trunk/drivers/edac/edac_mc.c @@ -402,7 +402,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) fail0: edac_printk(KERN_WARNING, EDAC_MC, "%s (%s) %s %s already assigned %d\n", p->dev->bus_id, - edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); + dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); return 1; fail1: @@ -517,7 +517,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) /* Report action taken */ edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" - " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); + " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci)); mutex_unlock(&mem_ctls_mutex); return 0; @@ -565,7 +565,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, - mci->mod_name, mci->ctl_name, edac_dev_name(mci)); + mci->mod_name, mci->ctl_name, dev_name(mci)); return mci; } diff --git a/trunk/drivers/edac/edac_pci.c b/trunk/drivers/edac/edac_pci.c index 22ec9d5d4312..9b24340b52e1 100644 --- a/trunk/drivers/edac/edac_pci.c +++ b/trunk/drivers/edac/edac_pci.c @@ -150,7 +150,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci) fail0: edac_printk(KERN_WARNING, EDAC_PCI, "%s (%s) %s %s already assigned %d\n", - rover->dev->bus_id, edac_dev_name(rover), + rover->dev->bus_id, dev_name(rover), rover->mod_name, rover->ctl_name, rover->pci_idx); return 1; @@ -360,7 +360,7 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) " DEV '%s' (%s)\n", pci->mod_name, pci->ctl_name, - edac_dev_name(pci), edac_op_state_to_string(pci->op_state)); + dev_name(pci), edac_op_state_to_string(pci->op_state)); mutex_unlock(&edac_pci_ctls_mutex); return 0; @@ -415,7 +415,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev) edac_printk(KERN_INFO, EDAC_PCI, "Removed device %d for %s %s: DEV %s\n", - pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci)); + pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci)); return pci; } diff --git a/trunk/drivers/ide/ide-probe.c b/trunk/drivers/ide/ide-probe.c index 34b0d4f26b58..591deda3f86a 100644 --- a/trunk/drivers/ide/ide-probe.c +++ b/trunk/drivers/ide/ide-probe.c @@ -1355,6 +1355,12 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) hwif->port_ops = d->port_ops; + if ((d->host_flags & IDE_HFLAG_SERIALIZE) || + ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) { + if (hwif->mate) + hwif->mate->serialized = hwif->serialized = 1; + } + hwif->swdma_mask = d->swdma_mask; hwif->mwdma_mask = d->mwdma_mask; hwif->ultra_mask = d->udma_mask; @@ -1376,12 +1382,6 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, hwif->dma_ops = d->dma_ops; } - if ((d->host_flags & IDE_HFLAG_SERIALIZE) || - ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) { - if (hwif->mate) - hwif->mate->serialized = hwif->serialized = 1; - } - if (d->host_flags & IDE_HFLAG_RQSIZE_256) hwif->rqsize = 256; diff --git a/trunk/drivers/ide/legacy/falconide.c b/trunk/drivers/ide/legacy/falconide.c index 9e449a0c623f..83555ca513b5 100644 --- a/trunk/drivers/ide/legacy/falconide.c +++ b/trunk/drivers/ide/legacy/falconide.c @@ -61,7 +61,7 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq, unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS) - return outsw(data_addr, buf, (len + 1) / 2); + return outsw(data_adr, buf, (len + 1) / 2); outsw_swapw(data_addr, buf, (len + 1) / 2); } diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c index ebf9d3043f80..ed2ee4ba4b7c 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -359,10 +359,9 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) cq->sw_wptr++; } -int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) +void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) { u32 ptr; - int flushed = 0; PDBG("%s wq %p cq %p\n", __func__, wq, cq); @@ -370,11 +369,8 @@ int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__, wq->rq_rptr, wq->rq_wptr, count); ptr = wq->rq_rptr + count; - while (ptr++ != wq->rq_wptr) { + while (ptr++ != wq->rq_wptr) insert_recv_cqe(wq, cq); - flushed++; - } - return flushed; } static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, @@ -398,10 +394,9 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, cq->sw_wptr++; } -int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) +void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) { __u32 ptr; - int flushed = 0; struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); ptr = wq->sq_rptr + count; @@ -410,9 +405,7 @@ int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) insert_sq_cqe(wq, cq, sqp); sqp++; ptr++; - flushed++; } - return flushed; } /* @@ -588,7 +581,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) * caller aquires the ctrl_qp lock before the call */ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, - u32 len, void *data) + u32 len, void *data, int completion) { u32 i, nr_wqe, copy_len; u8 *copy_data; @@ -624,7 +617,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, flag = 0; if (i == (nr_wqe - 1)) { /* last WQE */ - flag = T3_COMPLETION_FLAG; + flag = completion ? T3_COMPLETION_FLAG : 0; if (len % 32) utx_len = len / 32 + 1; else @@ -683,20 +676,21 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, return 0; } -/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr - * OUT: stag index +/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size + * OUT: stag index, actual pbl_size, pbl_addr allocated. * TBD: shared memory region support */ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum tpt_mem_type type, enum tpt_mem_perm perm, - u32 zbva, u64 to, u32 len, u8 page_size, - u32 pbl_size, u32 pbl_addr) + u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl, + u32 *pbl_size, u32 *pbl_addr) { int err; struct tpt_entry tpt; u32 stag_idx; u32 wptr; + int rereg = (*stag != T3_STAG_UNSET); stag_state = stag_state > 0; stag_idx = (*stag) >> 8; @@ -710,8 +704,30 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", __func__, stag_state, type, pdid, stag_idx); + if (reset_tpt_entry) + cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); + else if (!rereg) { + *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3); + if (!*pbl_addr) { + return -ENOMEM; + } + } + mutex_lock(&rdev_p->ctrl_qp.lock); + /* write PBL first if any - update pbl only if pbl list exist */ + if (pbl) { + + PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", + __func__, *pbl_addr, rdev_p->rnic_info.pbl_base, + *pbl_size); + err = cxio_hal_ctrl_qp_write_mem(rdev_p, + (*pbl_addr >> 5), + (*pbl_size << 3), pbl, 0); + if (err) + goto ret; + } + /* write TPT entry */ if (reset_tpt_entry) memset(&tpt, 0, sizeof(tpt)); @@ -726,23 +742,23 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | V_TPT_PAGE_SIZE(page_size)); tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : - cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); + cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3)); tpt.len = cpu_to_be32(len); tpt.va_hi = cpu_to_be32((u32) (to >> 32)); tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); tpt.rsvd_bind_cnt_or_pstag = 0; tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : - cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2)); + cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2)); } err = cxio_hal_ctrl_qp_write_mem(rdev_p, stag_idx + (rdev_p->rnic_info.tpt_base >> 5), - sizeof(tpt), &tpt); + sizeof(tpt), &tpt, 1); /* release the stag index to free pool */ if (reset_tpt_entry) cxio_hal_put_stag(rdev_p->rscp, stag_idx); - +ret: wptr = rdev_p->ctrl_qp.wptr; mutex_unlock(&rdev_p->ctrl_qp.lock); if (!err) @@ -753,67 +769,44 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, return err; } -int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl, - u32 pbl_addr, u32 pbl_size) -{ - u32 wptr; - int err; - - PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", - __func__, pbl_addr, rdev_p->rnic_info.pbl_base, - pbl_size); - - mutex_lock(&rdev_p->ctrl_qp.lock); - err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3, - pbl); - wptr = rdev_p->ctrl_qp.wptr; - mutex_unlock(&rdev_p->ctrl_qp.lock); - if (err) - return err; - - if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, - SEQ32_GE(rdev_p->ctrl_qp.rptr, - wptr))) - return -ERESTARTSYS; - - return 0; -} - int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, - u8 page_size, u32 pbl_size, u32 pbl_addr) + u8 page_size, __be64 *pbl, u32 *pbl_size, + u32 *pbl_addr) { *stag = T3_STAG_UNSET; return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, - zbva, to, len, page_size, pbl_size, pbl_addr); + zbva, to, len, page_size, pbl, pbl_size, pbl_addr); } int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, - u8 page_size, u32 pbl_size, u32 pbl_addr) + u8 page_size, __be64 *pbl, u32 *pbl_size, + u32 *pbl_addr) { return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, - zbva, to, len, page_size, pbl_size, pbl_addr); + zbva, to, len, page_size, pbl, pbl_size, pbl_addr); } int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, u32 pbl_addr) { - return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, - pbl_size, pbl_addr); + return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, + &pbl_size, &pbl_addr); } int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) { + u32 pbl_size = 0; *stag = T3_STAG_UNSET; return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, - 0, 0); + NULL, &pbl_size, NULL); } int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) { - return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, - 0, 0); + return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, + NULL, NULL); } int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h index 6e128f6bab05..2bcff7f5046e 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -154,14 +154,14 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, struct cxio_ucontext *uctx); int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); -int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl, - u32 pbl_addr, u32 pbl_size); int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, - u8 page_size, u32 pbl_size, u32 pbl_addr); + u8 page_size, __be64 *pbl, u32 *pbl_size, + u32 *pbl_addr); int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, - u8 page_size, u32 pbl_size, u32 pbl_addr); + u8 page_size, __be64 *pbl, u32 *pbl_size, + u32 *pbl_addr); int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size, u32 pbl_addr); int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid); @@ -173,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); int __init cxio_hal_init(void); void __exit cxio_hal_exit(void); -int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); -int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); +void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); +void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); void cxio_flush_hw_cq(struct t3_cq *cq); diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_resource.c b/trunk/drivers/infiniband/hw/cxgb3/cxio_resource.c index bd233c087653..45ed4f25ef78 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/cxio_resource.c +++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_resource.c @@ -250,6 +250,7 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp) */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ +#define PBL_CHUNK 2*1024*1024 u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) { @@ -266,35 +267,14 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) { - unsigned pbl_start, pbl_chunk; - + unsigned long i; rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); - if (!rdev_p->pbl_pool) - return -ENOMEM; - - pbl_start = rdev_p->rnic_info.pbl_base; - pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1; - - while (pbl_start < rdev_p->rnic_info.pbl_top) { - pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1, - pbl_chunk); - if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) { - PDBG("%s failed to add PBL chunk (%x/%x)\n", - __func__, pbl_start, pbl_chunk); - if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { - printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n", - __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start); - return 0; - } - pbl_chunk >>= 1; - } else { - PDBG("%s added PBL chunk (%x/%x)\n", - __func__, pbl_start, pbl_chunk); - pbl_start += pbl_chunk; - } - } - - return 0; + if (rdev_p->pbl_pool) + for (i = rdev_p->rnic_info.pbl_base; + i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1; + i += PBL_CHUNK) + gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1); + return rdev_p->pbl_pool ? 0 : -ENOMEM; } void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c index c325c44807e8..d44a6df9ad8c 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -67,10 +67,10 @@ int peer2peer = 0; module_param(peer2peer, int, 0644); MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); -static int ep_timeout_secs = 60; +static int ep_timeout_secs = 10; module_param(ep_timeout_secs, int, 0644); MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " - "in seconds (default=60)"); + "in seconds (default=10)"); static int mpa_rev = 1; module_param(mpa_rev, int, 0644); @@ -1650,8 +1650,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) release = 1; break; case ABORTING: - case DEAD: break; + case DEAD: default: BUG_ON(1); break; diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c index ec49a5cbdebb..58c3d61bcd14 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c @@ -35,26 +35,17 @@ #include #include "cxio_hal.h" -#include "cxio_resource.h" #include "iwch.h" #include "iwch_provider.h" -static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) -{ - u32 mmid; - - mhp->attr.state = 1; - mhp->attr.stag = stag; - mmid = stag >> 8; - mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); - PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); -} - int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, - struct iwch_mr *mhp, int shift) + struct iwch_mr *mhp, + int shift, + __be64 *page_list) { u32 stag; + u32 mmid; + if (cxio_register_phys_mem(&rhp->rdev, &stag, mhp->attr.pdid, @@ -62,21 +53,28 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len, - shift - 12, - mhp->attr.pbl_size, mhp->attr.pbl_addr)) + shift-12, + page_list, + &mhp->attr.pbl_size, &mhp->attr.pbl_addr)) return -ENOMEM; - - iwch_finish_mem_reg(mhp, stag); - + mhp->attr.state = 1; + mhp->attr.stag = stag; + mmid = stag >> 8; + mhp->ibmr.rkey = mhp->ibmr.lkey = stag; + insert_handle(rhp, &rhp->mmidr, mhp, mmid); + PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); return 0; } int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, + __be64 *page_list, int npages) { u32 stag; + u32 mmid; + /* We could support this... */ if (npages > mhp->attr.pbl_size) @@ -89,40 +87,19 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len, - shift - 12, - mhp->attr.pbl_size, mhp->attr.pbl_addr)) + shift-12, + page_list, + &mhp->attr.pbl_size, &mhp->attr.pbl_addr)) return -ENOMEM; - - iwch_finish_mem_reg(mhp, stag); - - return 0; -} - -int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) -{ - mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev, - npages << 3); - - if (!mhp->attr.pbl_addr) - return -ENOMEM; - - mhp->attr.pbl_size = npages; - + mhp->attr.state = 1; + mhp->attr.stag = stag; + mmid = stag >> 8; + mhp->ibmr.rkey = mhp->ibmr.lkey = stag; + insert_handle(rhp, &rhp->mmidr, mhp, mmid); + PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); return 0; } -void iwch_free_pbl(struct iwch_mr *mhp) -{ - cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, - mhp->attr.pbl_size << 3); -} - -int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) -{ - return cxio_write_pbl(&mhp->rhp->rdev, pages, - mhp->attr.pbl_addr + (offset << 3), npages); -} - int build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c index 8934178a23ee..d07d3a377b5f 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -442,7 +442,6 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr) mmid = mhp->attr.stag >> 8; cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); - iwch_free_pbl(mhp); remove_handle(rhp, &rhp->mmidr, mmid); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); @@ -476,8 +475,6 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, if (!mhp) return ERR_PTR(-ENOMEM); - mhp->rhp = rhp; - /* First check that we have enough alignment */ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { ret = -EINVAL; @@ -495,17 +492,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, if (ret) goto err; - ret = iwch_alloc_pbl(mhp, npages); - if (ret) { - kfree(page_list); - goto err_pbl; - } - - ret = iwch_write_pbl(mhp, page_list, npages, 0); - kfree(page_list); - if (ret) - goto err_pbl; - + mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; @@ -515,15 +502,12 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; - ret = iwch_register_mem(rhp, php, mhp, shift); - if (ret) - goto err_pbl; - + ret = iwch_register_mem(rhp, php, mhp, shift, page_list); + kfree(page_list); + if (ret) { + goto err; + } return &mhp->ibmr; - -err_pbl: - iwch_free_pbl(mhp); - err: kfree(mhp); return ERR_PTR(ret); @@ -576,7 +560,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, return ret; } - ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); + ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); kfree(page_list); if (ret) { return ret; @@ -618,8 +602,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mhp) return ERR_PTR(-ENOMEM); - mhp->rhp = rhp; - mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); @@ -633,14 +615,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; - err = iwch_alloc_pbl(mhp, n); - if (err) - goto err; - - pages = (__be64 *) __get_free_page(GFP_KERNEL); + pages = kmalloc(n * sizeof(u64), GFP_KERNEL); if (!pages) { err = -ENOMEM; - goto err_pbl; + goto err; } i = n = 0; @@ -652,38 +630,25 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, pages[i++] = cpu_to_be64(sg_dma_address( &chunk->page_list[j]) + mhp->umem->page_size * k); - if (i == PAGE_SIZE / sizeof *pages) { - err = iwch_write_pbl(mhp, pages, i, n); - if (err) - goto pbl_done; - n += i; - i = 0; - } } } - if (i) - err = iwch_write_pbl(mhp, pages, i, n); - -pbl_done: - free_page((unsigned long) pages); - if (err) - goto err_pbl; - + mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) length; - - err = iwch_register_mem(rhp, php, mhp, shift); + mhp->attr.pbl_size = i; + err = iwch_register_mem(rhp, php, mhp, shift, pages); + kfree(pages); if (err) - goto err_pbl; + goto err; if (udata && !t3a_device(rhp)) { uresp.pbl_addr = (mhp->attr.pbl_addr - - rhp->rdev.rnic_info.pbl_base) >> 3; + rhp->rdev.rnic_info.pbl_base) >> 3; PDBG("%s user resp pbl_addr 0x%x\n", __func__, uresp.pbl_addr); @@ -696,9 +661,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return &mhp->ibmr; -err_pbl: - iwch_free_pbl(mhp); - err: ib_umem_release(mhp->umem); kfree(mhp); diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h index 836163fc5429..db5100d27ca2 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -340,14 +340,14 @@ int iwch_quiesce_qps(struct iwch_cq *chp); int iwch_resume_qps(struct iwch_cq *chp); void stop_read_rep_timer(struct iwch_qp *qhp); int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, - struct iwch_mr *mhp, int shift); + struct iwch_mr *mhp, + int shift, + __be64 *page_list); int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, + __be64 *page_list, int npages); -int iwch_alloc_pbl(struct iwch_mr *mhp, int npages); -void iwch_free_pbl(struct iwch_mr *mhp); -int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset); int build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c index 79dbe5beae52..9b4be889c58e 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -655,7 +655,6 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) { struct iwch_cq *rchp, *schp; int count; - int flushed; rchp = get_chp(qhp->rhp, qhp->attr.rcq); schp = get_chp(qhp->rhp, qhp->attr.scq); @@ -670,22 +669,20 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); - flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); + cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, *flag); - if (flushed) - (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); /* locking heirarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, *flag); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); - flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); + cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, *flag); - if (flushed) - (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); + (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) @@ -883,6 +880,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, ep = qhp->ep; get_ep(&ep->com); } + flush_qp(qhp, &flag); break; case IWCH_QP_STATE_TERMINATE: qhp->attr.state = IWCH_QP_STATE_TERMINATE; @@ -913,7 +911,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: - flush_qp(qhp, &flag); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_classes.h b/trunk/drivers/infiniband/hw/ehca/ehca_classes.h index 1e9e99a13933..00bab60f6de4 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/trunk/drivers/infiniband/hw/ehca/ehca_classes.h @@ -192,8 +192,6 @@ struct ehca_qp { int mtu_shift; u32 message_count; u32 packet_count; - atomic_t nr_events; /* events seen */ - wait_queue_head_t wait_completion; }; #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_hca.c b/trunk/drivers/infiniband/hw/ehca/ehca_hca.c index bc3b37d2070f..2515cbde7e65 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_hca.c @@ -101,6 +101,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) props->max_ee = limit_uint(rblock->max_rd_ee_context); props->max_rdd = limit_uint(rblock->max_rd_domain); props->max_fmr = limit_uint(rblock->max_mr); + props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay); props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp); props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context); props->max_res_rd_atom = limit_uint(rblock->max_rr_hca); @@ -114,7 +115,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) } props->max_pkeys = 16; - props->local_ca_ack_delay = min_t(u8, rblock->local_ca_ack_delay, 255); + props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay); props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp); props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp); props->max_mcast_grp = limit_uint(rblock->max_mcast_grp); @@ -135,7 +136,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) return ret; } -static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu) +static int map_mtu(struct ehca_shca *shca, u32 fw_mtu) { switch (fw_mtu) { case 0x1: @@ -155,7 +156,7 @@ static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu) } } -static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap) +static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap) { switch (vl_cap) { case 0x1: diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_irq.c b/trunk/drivers/infiniband/hw/ehca/ehca_irq.c index ce1ab0571be3..ca5eb0cb628c 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_irq.c @@ -204,8 +204,6 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe, read_lock(&ehca_qp_idr_lock); qp = idr_find(&ehca_qp_idr, token); - if (qp) - atomic_inc(&qp->nr_events); read_unlock(&ehca_qp_idr_lock); if (!qp) @@ -225,8 +223,6 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe, if (fatal && qp->ext_type == EQPT_SRQBASE) dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); - if (atomic_dec_and_test(&qp->nr_events)) - wake_up(&qp->wait_completion); return; } diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c index 3f59587338ea..18fba92fa7ae 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c @@ -566,8 +566,6 @@ static struct ehca_qp *internal_create_qp( return ERR_PTR(-ENOMEM); } - atomic_set(&my_qp->nr_events, 0); - init_waitqueue_head(&my_qp->wait_completion); spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_r); my_qp->qp_type = qp_type; @@ -1936,9 +1934,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, idr_remove(&ehca_qp_idr, my_qp->token); write_unlock_irqrestore(&ehca_qp_idr_lock, flags); - /* now wait until all pending events have completed */ - wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); - h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); if (h_ret != H_SUCCESS) { ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c index ce7b7c34360e..acf30c06a0c0 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c @@ -1197,7 +1197,7 @@ void ipath_kreceive(struct ipath_portdata *pd) } reloop: - for (last = 0, i = 1; !last; i += !last) { + for (last = 0, i = 1; !last; i++) { hdr = dd->ipath_f_get_msgheader(dd, rhf_addr); eflags = ipath_hdrget_err_flags(rhf_addr); etype = ipath_hdrget_rcv_type(rhf_addr); @@ -1428,40 +1428,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd) spin_unlock_irqrestore(&ipath_pioavail_lock, flags); } -/* - * used to force update of pioavailshadow if we can't get a pio buffer. - * Needed primarily due to exitting freeze mode after recovering - * from errors. Done lazily, because it's safer (known to not - * be writing pio buffers). - */ -static void ipath_reset_availshadow(struct ipath_devdata *dd) -{ - int i, im; - unsigned long flags; - - spin_lock_irqsave(&ipath_pioavail_lock, flags); - for (i = 0; i < dd->ipath_pioavregs; i++) { - u64 val, oldval; - /* deal with 6110 chip bug on high register #s */ - im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ? - i ^ 1 : i; - val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]); - /* - * busy out the buffers not in the kernel avail list, - * without changing the generation bits. - */ - oldval = dd->ipath_pioavailshadow[i]; - dd->ipath_pioavailshadow[i] = val | - ((~dd->ipath_pioavailkernel[i] << - INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) & - 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ - if (oldval != dd->ipath_pioavailshadow[i]) - ipath_dbg("shadow[%d] was %Lx, now %lx\n", - i, oldval, dd->ipath_pioavailshadow[i]); - } - spin_unlock_irqrestore(&ipath_pioavail_lock, flags); -} - /** * ipath_setrcvhdrsize - set the receive header size * @dd: the infinipath device @@ -1516,12 +1482,9 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd) */ ipath_stats.sps_nopiobufs++; if (!(++dd->ipath_consec_nopiobuf % 100000)) { - ipath_force_pio_avail_update(dd); /* at start */ - ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: " - "%llx %llx %llx %llx\n" - "ipath shadow: %lx %lx %lx %lx\n", + ipath_dbg("%u pio sends with no bufavail; dmacopy: " + "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n", dd->ipath_consec_nopiobuf, - (unsigned long)get_cycles(), (unsigned long long) le64_to_cpu(dma[0]), (unsigned long long) le64_to_cpu(dma[1]), (unsigned long long) le64_to_cpu(dma[2]), @@ -1533,17 +1496,14 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd) */ if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > (sizeof(shadow[0]) * 4 * 4)) - ipath_dbg("2nd group: dmacopy: " - "%llx %llx %llx %llx\n" - "ipath shadow: %lx %lx %lx %lx\n", + ipath_dbg("2nd group: dmacopy: %llx %llx " + "%llx %llx; shadow: %lx %lx %lx %lx\n", (unsigned long long)le64_to_cpu(dma[4]), (unsigned long long)le64_to_cpu(dma[5]), (unsigned long long)le64_to_cpu(dma[6]), (unsigned long long)le64_to_cpu(dma[7]), - shadow[4], shadow[5], shadow[6], shadow[7]); - - /* at end, so update likely happened */ - ipath_reset_availshadow(dd); + shadow[4], shadow[5], shadow[6], + shadow[7]); } } @@ -1692,46 +1652,19 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, unsigned len, int avail) { unsigned long flags; - unsigned end, cnt = 0, next; + unsigned end; /* There are two bits per send buffer (busy and generation) */ start *= 2; - end = start + len * 2; + len *= 2; + end = start + len; + /* Set or clear the generation bits. */ spin_lock_irqsave(&ipath_pioavail_lock, flags); - /* Set or clear the busy bit in the shadow. */ while (start < end) { if (avail) { - unsigned long dma; - int i, im; - /* - * the BUSY bit will never be set, because we disarm - * the user buffers before we hand them back to the - * kernel. We do have to make sure the generation - * bit is set correctly in shadow, since it could - * have changed many times while allocated to user. - * We can't use the bitmap functions on the full - * dma array because it is always little-endian, so - * we have to flip to host-order first. - * BITS_PER_LONG is slightly wrong, since it's - * always 64 bits per register in chip... - * We only work on 64 bit kernels, so that's OK. - */ - /* deal with 6110 chip bug on high register #s */ - i = start / BITS_PER_LONG; - im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ? - i ^ 1 : i; - __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT - + start, dd->ipath_pioavailshadow); - dma = (unsigned long) le64_to_cpu( - dd->ipath_pioavailregs_dma[im]); - if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT - + start) % BITS_PER_LONG, &dma)) - __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT - + start, dd->ipath_pioavailshadow); - else - __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT - + start, dd->ipath_pioavailshadow); + __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, + dd->ipath_pioavailshadow); __set_bit(start, dd->ipath_pioavailkernel); } else { __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT, @@ -1740,44 +1673,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, } start += 2; } - - if (dd->ipath_pioupd_thresh) { - end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k); - next = find_first_bit(dd->ipath_pioavailkernel, end); - while (next < end) { - cnt++; - next = find_next_bit(dd->ipath_pioavailkernel, end, - next + 1); - } - } spin_unlock_irqrestore(&ipath_pioavail_lock, flags); - - /* - * When moving buffers from kernel to user, if number assigned to - * the user is less than the pio update threshold, and threshold - * is supported (cnt was computed > 0), drop the update threshold - * so we update at least once per allocated number of buffers. - * In any case, if the kernel buffers are less than the threshold, - * drop the threshold. We don't bother increasing it, having once - * decreased it, since it would typically just cycle back and forth. - * If we don't decrease below buffers in use, we can wait a long - * time for an update, until some other context uses PIO buffers. - */ - if (!avail && len < cnt) - cnt = len; - if (cnt < dd->ipath_pioupd_thresh) { - dd->ipath_pioupd_thresh = cnt; - ipath_dbg("Decreased pio update threshold to %u\n", - dd->ipath_pioupd_thresh); - spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); - dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK - << INFINIPATH_S_UPDTHRESH_SHIFT); - dd->ipath_sendctrl |= dd->ipath_pioupd_thresh - << INFINIPATH_S_UPDTHRESH_SHIFT; - ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, - dd->ipath_sendctrl); - spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); - } } /** @@ -1898,8 +1794,8 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) spin_lock_irqsave(&dd->ipath_sdma_lock, flags); skip_cancel = - test_and_set_bit(IPATH_SDMA_ABORTING, statp) - && !test_bit(IPATH_SDMA_DISABLED, statp); + !test_bit(IPATH_SDMA_DISABLED, statp) && + test_and_set_bit(IPATH_SDMA_ABORTING, statp); spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); if (skip_cancel) goto bail; @@ -1930,9 +1826,6 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) ipath_disarm_piobufs(dd, 0, dd->ipath_piobcnt2k + dd->ipath_piobcnt4k); - if (dd->ipath_flags & IPATH_HAS_SEND_DMA) - set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); - if (restore_sendctrl) { /* else done by caller later if needed */ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); @@ -1952,6 +1845,7 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) /* only wait so long for intr */ dd->ipath_sdma_abort_intr_timeout = jiffies + HZ; dd->ipath_sdma_reset_wait = 200; + __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) tasklet_hi_schedule(&dd->ipath_sdma_abort_task); spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c index 3295177c937e..8b1752202e78 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -173,25 +173,47 @@ static int ipath_get_base_info(struct file *fp, (void *) dd->ipath_statusp - (void *) dd->ipath_pioavailregs_dma; if (!shared) { - kinfo->spi_piocnt = pd->port_piocnt; + kinfo->spi_piocnt = dd->ipath_pbufsport; kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; } else if (master) { - kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + - (pd->port_piocnt % subport_cnt); + kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + + (dd->ipath_pbufsport % subport_cnt); /* Master's PIO buffers are after all the slave's */ kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * - (pd->port_piocnt - kinfo->spi_piocnt); + (dd->ipath_pbufsport - kinfo->spi_piocnt); } else { unsigned slave = subport_fp(fp) - 1; - kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; + kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * kinfo->spi_piocnt * slave; } + /* + * Set the PIO avail update threshold to no larger + * than the number of buffers per process. Note that + * we decrease it here, but won't ever increase it. + */ + if (dd->ipath_pioupd_thresh && + kinfo->spi_piocnt < dd->ipath_pioupd_thresh) { + unsigned long flags; + + dd->ipath_pioupd_thresh = kinfo->spi_piocnt; + ipath_dbg("Decreased pio update threshold to %u\n", + dd->ipath_pioupd_thresh); + spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); + dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK + << INFINIPATH_S_UPDTHRESH_SHIFT); + dd->ipath_sendctrl |= dd->ipath_pioupd_thresh + << INFINIPATH_S_UPDTHRESH_SHIFT; + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, + dd->ipath_sendctrl); + spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); + } + if (shared) { kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; @@ -1287,19 +1309,19 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; if (!pd->port_subport_cnt) { /* port is not shared */ - piocnt = pd->port_piocnt; + piocnt = dd->ipath_pbufsport; piobufs = pd->port_piobufs; } else if (!subport_fp(fp)) { /* caller is the master */ - piocnt = (pd->port_piocnt / pd->port_subport_cnt) + - (pd->port_piocnt % pd->port_subport_cnt); + piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + + (dd->ipath_pbufsport % pd->port_subport_cnt); piobufs = pd->port_piobufs + - dd->ipath_palign * (pd->port_piocnt - piocnt); + dd->ipath_palign * (dd->ipath_pbufsport - piocnt); } else { unsigned slave = subport_fp(fp) - 1; /* caller is a slave */ - piocnt = pd->port_piocnt / pd->port_subport_cnt; + piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; } @@ -1611,6 +1633,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, port_fp(fp) = pd; pd->port_pid = current->pid; strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); + ipath_chg_pioavailkernel(dd, + dd->ipath_pbufsport * (pd->port_port - 1), + dd->ipath_pbufsport, 0); ipath_stats.sps_ports++; ret = 0; } else @@ -1913,25 +1938,11 @@ static int ipath_do_user_init(struct file *fp, /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ - /* some ports may get extra buffers, calculate that here */ - if (pd->port_port <= dd->ipath_ports_extrabuf) - pd->port_piocnt = dd->ipath_pbufsport + 1; - else - pd->port_piocnt = dd->ipath_pbufsport; - /* for right now, kernel piobufs are at end, so port 1 is at 0 */ - if (pd->port_port <= dd->ipath_ports_extrabuf) - pd->port_pio_base = (dd->ipath_pbufsport + 1) - * (pd->port_port - 1); - else - pd->port_pio_base = dd->ipath_ports_extrabuf + - dd->ipath_pbufsport * (pd->port_port - 1); pd->port_piobufs = dd->ipath_piobufbase + - pd->port_pio_base * dd->ipath_palign; - ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," - " first pio %u\n", pd->port_port, pd->port_piobufs, - pd->port_piocnt, pd->port_pio_base); - ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); + dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign; + ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", + pd->port_port, pd->port_piobufs); /* * Now allocate the rcvhdr Q and eager TIDs; skip the TID @@ -2096,6 +2107,7 @@ static int ipath_close(struct inode *in, struct file *fp) } if (dd->ipath_kregbase) { + int i; /* atomically clear receive enable port and intr avail. */ clear_bit(dd->ipath_r_portenable_shift + port, &dd->ipath_rcvctrl); @@ -2124,9 +2136,9 @@ static int ipath_close(struct inode *in, struct file *fp) ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, pd->port_port, dd->ipath_dummy_hdrq_phys); - ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); - ipath_chg_pioavailkernel(dd, pd->port_pio_base, - pd->port_piocnt, 1); + i = dd->ipath_pbufsport * (port - 1); + ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); + ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1); dd->ipath_f_clear_tids(dd, pd->port_port); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c b/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c index 8eee7830f042..e3ec0d1bdf50 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c @@ -595,7 +595,7 @@ static void ipath_7220_txe_recover(struct ipath_devdata *dd) dev_info(&dd->pcidev->dev, "Recovering from TXE PIO parity error\n"); - ipath_disarm_senderrbufs(dd); + ipath_disarm_senderrbufs(dd, 1); } @@ -675,8 +675,10 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg, ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { /* - * Parity errors in send memory are recoverable by h/w - * just do housekeeping, exit freeze mode and continue. + * Parity errors in send memory are recoverable, + * just cancel the send (if indicated in * sendbuffererror), + * count the occurrence, unfreeze (if no other handled + * hardware error bits are set), and continue. */ if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) @@ -685,6 +687,13 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg, hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); + if (!hwerrs) { + /* else leave in freeze mode */ + ipath_write_kreg(dd, + dd->ipath_kregs->kr_control, + dd->ipath_control); + goto bail; + } } if (hwerrs) { /* @@ -714,8 +723,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg, *dd->ipath_statusp |= IPATH_STATUS_HWERROR; dd->ipath_flags &= ~IPATH_INITTED; } else { - ipath_dbg("Clearing freezemode on ignored or " - "recovered hardware error\n"); + ipath_dbg("Clearing freezemode on ignored hardware " + "error\n"); ipath_clear_freeze(dd); } } @@ -861,9 +870,8 @@ static int ipath_7220_boardname(struct ipath_devdata *dd, char *name, "revision %u.%u!\n", dd->ipath_majrev, dd->ipath_minrev); ret = 1; - } else if (dd->ipath_minrev == 1 && - !(dd->ipath_flags & IPATH_INITTED)) { - /* Rev1 chips are prototype. Complain at init, but allow use */ + } else if (dd->ipath_minrev == 1) { + /* Rev1 chips are prototype. Complain, but allow use */ ipath_dev_err(dd, "Unsupported hardware " "revision %u.%u, Contact support@qlogic.com\n", dd->ipath_majrev, dd->ipath_minrev); @@ -1958,7 +1966,7 @@ static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports) dd->ipath_rcvctrl); dd->ipath_p0_rcvegrcnt = 2048; /* always */ if (dd->ipath_flags & IPATH_HAS_SEND_DMA) - dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */ + dd->ipath_pioreserved = 1; /* reserve a buffer */ } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c index 3e5baa43fc82..27dd89476660 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -41,7 +41,7 @@ /* * min buffers we want to have per port, after driver */ -#define IPATH_MIN_USER_PORT_BUFCNT 7 +#define IPATH_MIN_USER_PORT_BUFCNT 8 /* * Number of ports we are configured to use (to allow for more pio @@ -54,9 +54,13 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); /* * Number of buffers reserved for driver (verbs and layered drivers.) - * Initialized based on number of PIO buffers if not set via module interface. + * Reserved at end of buffer list. Initialized based on + * number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different - * numbers for different chip types. + * numbers for different chip types. So the default value is not + * very useful. I've redefined it for the 1.3 release so that it's + * zero unless set by the user to something else, in which case we + * try to respect it. */ static ushort ipath_kpiobufs; @@ -542,12 +546,9 @@ static void enable_chip(struct ipath_devdata *dd, int reinit) pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; else pioavail = dd->ipath_pioavailregs_dma[i]; - /* - * don't need to worry about ipath_pioavailkernel here - * because we will call ipath_chg_pioavailkernel() later - * in initialization, to busy out buffers as needed - */ - dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail); + dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) | + (~dd->ipath_pioavailkernel[i] << + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT); } /* can get counters, stats, etc. */ dd->ipath_flags |= IPATH_PRESENT; @@ -707,11 +708,12 @@ static void verify_interrupt(unsigned long opaque) int ipath_init_chip(struct ipath_devdata *dd, int reinit) { int ret = 0; - u32 kpiobufs, defkbufs; + u32 val32, kpiobufs; u32 piobufs, uports; u64 val; struct ipath_portdata *pd; gfp_t gfp_flags = GFP_USER | __GFP_COMP; + unsigned long flags; ret = init_housekeeping(dd, reinit); if (ret) @@ -751,52 +753,69 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; - if (piobufs > 144) - defkbufs = 32 + dd->ipath_pioreserved; + if (ipath_kpiobufs == 0) { + /* not set by user (this is default) */ + if (piobufs > 144) + kpiobufs = 32; + else + kpiobufs = 16; + } else - defkbufs = 16 + dd->ipath_pioreserved; + kpiobufs = ipath_kpiobufs; - if (ipath_kpiobufs && (ipath_kpiobufs + - (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) { + if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { int i = (int) piobufs - (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); if (i < 1) i = 1; dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " "%d for kernel leaves too few for %d user ports " - "(%d each); using %u\n", ipath_kpiobufs, + "(%d each); using %u\n", kpiobufs, piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); /* * shouldn't change ipath_kpiobufs, because could be * different for different devices... */ kpiobufs = i; - } else if (ipath_kpiobufs) - kpiobufs = ipath_kpiobufs; - else - kpiobufs = defkbufs; + } dd->ipath_lastport_piobuf = piobufs - kpiobufs; dd->ipath_pbufsport = uports ? dd->ipath_lastport_piobuf / uports : 0; - /* if not an even divisor, some user ports get extra buffers */ - dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf - - (dd->ipath_pbufsport * uports); - if (dd->ipath_ports_extrabuf) - ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to " - "ports <= %u\n", dd->ipath_pbufsport, - dd->ipath_ports_extrabuf); + val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); + if (val32 > 0) { + ipath_dbg("allocating %u pbufs/port leaves %u unused, " + "add to kernel\n", dd->ipath_pbufsport, val32); + dd->ipath_lastport_piobuf -= val32; + kpiobufs += val32; + ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", + dd->ipath_pbufsport, val32); + } dd->ipath_lastpioindex = 0; dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; - /* ipath_pioavailshadow initialized earlier */ + ipath_chg_pioavailkernel(dd, 0, piobufs, 1); ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " "each for %u user ports\n", kpiobufs, piobufs, dd->ipath_pbufsport, uports); + if (dd->ipath_pioupd_thresh) { + if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh) + dd->ipath_pioupd_thresh = dd->ipath_pbufsport; + if (kpiobufs < dd->ipath_pioupd_thresh) + dd->ipath_pioupd_thresh = kpiobufs; + } + ret = dd->ipath_f_early_init(dd); if (ret) { ipath_dev_err(dd, "Early initialization failure\n"); goto done; } + /* + * Cancel any possible active sends from early driver load. + * Follows early_init because some chips have to initialize + * PIO buffers in early_init to avoid false parity errors. + */ + ipath_cancel_sends(dd, 0); + /* * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be * done after early_init. @@ -817,7 +836,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, dd->ipath_pioavailregs_phys); - /* * this is to detect s/w errors, which the h/w works around by * ignoring the low 6 bits of address, if it wasn't aligned. @@ -844,6 +862,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); + spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); + dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE; + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); + ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); + spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); + /* * before error clears, since we expect serdes pll errors during * this, the first time after reset @@ -916,19 +940,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) else enable_chip(dd, reinit); - /* after enable_chip, so pioavailshadow setup */ - ipath_chg_pioavailkernel(dd, 0, piobufs, 1); - - /* - * Cancel any possible active sends from early driver load. - * Follows early_init because some chips have to initialize - * PIO buffers in early_init to avoid false parity errors. - * After enable and ipath_chg_pioavailkernel so we can safely - * enable pioavail updates and PIOENABLE; packets are now - * ready to go out. - */ - ipath_cancel_sends(dd, 1); - if (!reinit) { /* * Used when we close a port, for DMA already in flight diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_intr.c b/trunk/drivers/infiniband/hw/ipath/ipath_intr.c index 26900b3b7a4e..1b58f4737c71 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_intr.c @@ -38,12 +38,42 @@ #include "ipath_verbs.h" #include "ipath_common.h" +/* + * clear (write) a pio buffer, to clear a parity error. This routine + * should only be called when in freeze mode, and the buffer should be + * canceled afterwards. + */ +static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum) +{ + u32 __iomem *pbuf; + u32 dwcnt; /* dword count to write */ + if (pnum < dd->ipath_piobcnt2k) { + pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum * + dd->ipath_palign); + dwcnt = dd->ipath_piosize2k >> 2; + } + else { + pbuf = (u32 __iomem *) (dd->ipath_pio4kbase + + (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign); + dwcnt = dd->ipath_piosize4k >> 2; + } + dev_info(&dd->pcidev->dev, + "Rewrite PIO buffer %u, to recover from parity error\n", + pnum); + + /* no flush required, since already in freeze */ + writel(dwcnt + 1, pbuf); + while (--dwcnt) + writel(0, pbuf++); +} /* * Called when we might have an error that is specific to a particular * PIO buffer, and may need to cancel that buffer, so it can be re-used. + * If rewrite is true, and bits are set in the sendbufferror registers, + * we'll write to the buffer, for error recovery on parity errors. */ -void ipath_disarm_senderrbufs(struct ipath_devdata *dd) +void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) { u32 piobcnt; unsigned long sbuf[4]; @@ -79,8 +109,11 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd) } for (i = 0; i < piobcnt; i++) - if (test_bit(i, sbuf)) + if (test_bit(i, sbuf)) { + if (rewrite) + ipath_clrpiobuf(dd, i); ipath_disarm_piobufs(dd, i, 1); + } /* ignore armlaunch errs for a bit */ dd->ipath_lastcancel = jiffies+3; } @@ -131,7 +164,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) { u64 ignore_this_time = 0; - ipath_disarm_senderrbufs(dd); + ipath_disarm_senderrbufs(dd, 0); if ((errs & E_SUM_LINK_PKTERRS) && !(dd->ipath_flags & IPATH_LINKACTIVE)) { /* @@ -876,8 +909,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) * processes (causing armlaunch), send errors due to going into freeze mode, * etc., and try to avoid causing extra interrupts while doing so. * Forcibly update the in-memory pioavail register copies after cleanup - * because the chip won't do it while in freeze mode (the register values - * themselves are kept correct). + * because the chip won't do it for anything changing while in freeze mode + * (we don't want to wait for the next pio buffer state change). * Make sure that we don't lose any important interrupts by using the chip * feature that says that writing 0 to a bit in *clear that is set in * *status will cause an interrupt to be generated again (if allowed by @@ -885,22 +918,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) */ void ipath_clear_freeze(struct ipath_devdata *dd) { + int i, im; + u64 val; + /* disable error interrupts, to avoid confusion */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); /* also disable interrupts; errormask is sometimes overwriten */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL); - ipath_cancel_sends(dd, 1); - - /* clear the freeze, and be sure chip saw it */ + /* + * clear all sends, because they have may been + * completed by usercode while in freeze mode, and + * therefore would not be sent, and eventually + * might cause the process to run out of bufs + */ + ipath_cancel_sends(dd, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); - ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - /* force in-memory update now we are out of freeze */ + /* ensure pio avail updates continue */ ipath_force_pio_avail_update(dd); + /* + * We just enabled pioavailupdate, so dma copy is almost certainly + * not yet right, so read the registers directly. Similar to init + */ + for (i = 0; i < dd->ipath_pioavregs; i++) { + /* deal with 6110 chip bug */ + im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ? + i ^ 1 : i; + val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im); + dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val); + dd->ipath_pioavailshadow[i] = val | + (~dd->ipath_pioavailkernel[i] << + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT); + } + /* * force new interrupt if any hwerr, error or interrupt bits are * still set, and clear "safe" send packet errors related to freeze @@ -1258,8 +1312,10 @@ irqreturn_t ipath_intr(int irq, void *data) ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); - /* always process; sdma verbs uses PIO for acks and VL15 */ - handle_layer_pioavail(dd); + if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) + handle_layer_pioavail(dd); + else + ipath_dbg("unexpected BUFAVAIL intr\n"); } ret = IRQ_HANDLED; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h b/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h index 02b24a340599..202337ae90dc 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -117,10 +117,6 @@ struct ipath_portdata { u16 port_subport_cnt; /* non-zero if port is being shared. */ u16 port_subport_id; - /* number of pio bufs for this port (all procs, if shared) */ - u32 port_piocnt; - /* first pio buffer for this port */ - u32 port_pio_base; /* chip offset of PIO buffers for this port */ u32 port_piobufs; /* how many alloc_pages() chunks in port_rcvegrbuf_pages */ @@ -388,8 +384,6 @@ struct ipath_devdata { u32 ipath_lastrpkts; /* pio bufs allocated per port */ u32 ipath_pbufsport; - /* if remainder on bufs/port, ports < extrabuf get 1 extra */ - u32 ipath_ports_extrabuf; u32 ipath_pioupd_thresh; /* update threshold, some chips */ /* * number of ports configured as max; zero is set to number chip @@ -1017,7 +1011,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *); int ipath_update_eeprom_log(struct ipath_devdata *dd); void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); -void ipath_disarm_senderrbufs(struct ipath_devdata *); +void ipath_disarm_senderrbufs(struct ipath_devdata *, int); void ipath_force_pio_avail_update(struct ipath_devdata *); void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_rc.c b/trunk/drivers/infiniband/hw/ipath/ipath_rc.c index 08b11b567614..c405dfba5531 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_rc.c @@ -1746,11 +1746,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, qp->r_wrid_valid = 0; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; - if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || - opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) - wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; - else - wc.opcode = IB_WC_RECV; + wc.opcode = IB_WC_RECV; wc.vendor_err = 0; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_ruc.c b/trunk/drivers/infiniband/hw/ipath/ipath_ruc.c index 9e3fe61cbd08..8ac5c1d82ccd 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -481,10 +481,9 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) wake_up(&qp->wait); } -static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) +static void want_buffer(struct ipath_devdata *dd) { - if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) || - qp->ibqp.qp_type == IB_QPT_SMI) { + if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) { unsigned long flags; spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); @@ -520,7 +519,7 @@ static void ipath_no_bufs_available(struct ipath_qp *qp, spin_lock_irqsave(&dev->pending_lock, flags); list_add_tail(&qp->piowait, &dev->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); - want_buffer(dev->dd, qp); + want_buffer(dev->dd); dev->n_piowait++; } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c b/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c index 3697449c1ba4..1974df7a9f78 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c @@ -308,15 +308,13 @@ static void sdma_abort_task(unsigned long opaque) spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); /* - * Don't restart sdma here (with the exception - * below). Wait until link is up to ACTIVE. VL15 MADs - * used to bring the link up use PIO, and multiple link - * transitions otherwise cause the sdma engine to be + * Don't restart sdma here. Wait until link is up to ACTIVE. + * VL15 MADs used to bring the link up use PIO, and multiple + * link transitions otherwise cause the sdma engine to be * stopped and started multiple times. - * The disable is done here, including the shadow, - * so the state is kept consistent. - * See ipath_restart_sdma() for the actual starting - * of sdma. + * The disable is done here, including the shadow, so the + * state is kept consistent. + * See ipath_restart_sdma() for the actual starting of sdma. */ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; @@ -328,13 +326,6 @@ static void sdma_abort_task(unsigned long opaque) /* make sure I see next message */ dd->ipath_sdma_abort_jiffies = 0; - /* - * Not everything that takes SDMA offline is a link - * status change. If the link was up, restart SDMA. - */ - if (dd->ipath_flags & IPATH_LINKACTIVE) - ipath_restart_sdma(dd); - goto done; } @@ -436,12 +427,7 @@ int setup_sdma(struct ipath_devdata *dd) goto done; } - /* - * Set initial status as if we had been up, then gone down. - * This lets initial start on transition to ACTIVE be the - * same as restart after link flap. - */ - dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED; + dd->ipath_sdma_status = 0; dd->ipath_sdma_abort_jiffies = 0; dd->ipath_sdma_generation = 0; dd->ipath_sdma_descq_tail = 0; @@ -463,19 +449,16 @@ int setup_sdma(struct ipath_devdata *dd) ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, dd->ipath_sdma_head_phys); - /* - * Reserve all the former "kernel" piobufs, using high number range - * so we get as many 4K buffers as possible - */ - n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; - i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved; - ipath_chg_pioavailkernel(dd, i, n - i , 0); - for (; i < n; ++i) { + /* Reserve all the former "kernel" piobufs */ + n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved; + for (i = dd->ipath_lastport_piobuf; i < n; ++i) { unsigned word = i / 64; unsigned bit = i & 63; BUG_ON(word >= 3); senddmabufmask[word] |= 1ULL << bit; } + ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf, + n - dd->ipath_lastport_piobuf, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, senddmabufmask[0]); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, @@ -632,9 +615,6 @@ void ipath_restart_sdma(struct ipath_devdata *dd) ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); - /* notify upper layers */ - ipath_ib_piobufavail(dd->verbs_dev); - bail: return; } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c index 5015cd2e57bd..e63927cce5b5 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -396,6 +396,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; + wqe->ssn = qp->s_ssn++; wqe->length = 0; if (wr->num_sge) { acc = wr->opcode >= IB_WR_RDMA_READ ? @@ -421,7 +422,6 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) goto bail_inval; } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) goto bail_inval; - wqe->ssn = qp->s_ssn++; qp->s_head = next; ret = 0; diff --git a/trunk/drivers/infiniband/hw/mlx4/cq.c b/trunk/drivers/infiniband/hw/mlx4/cq.c index 4521319b1406..2f199c5c4a72 100644 --- a/trunk/drivers/infiniband/hw/mlx4/cq.c +++ b/trunk/drivers/infiniband/hw/mlx4/cq.c @@ -246,7 +246,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector if (context) ib_umem_release(cq->umem); else - mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); + mlx4_ib_free_cq_buf(dev, &cq->buf, entries); err_db: if (!context) @@ -434,7 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); ib_umem_release(mcq->umem); } else { - mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); + mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); mlx4_db_free(dev->dev, &mcq->db); } diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h index ca126fc2b853..9044f8803532 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h @@ -334,7 +334,6 @@ struct ipoib_dev_priv { #endif int hca_caps; struct ipoib_ethtool_st ethtool; - struct timer_list poll_timer; }; struct ipoib_ah { @@ -405,7 +404,6 @@ extern struct workqueue_struct *ipoib_workqueue; int ipoib_poll(struct napi_struct *napi, int budget); void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); -void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr); diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c index f429bce24c20..97b815c1a3fc 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -461,26 +461,6 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) netif_rx_schedule(dev, &priv->napi); } -static void drain_tx_cq(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&priv->tx_lock, flags); - while (poll_tx(priv)) - ; /* nothing */ - - if (netif_queue_stopped(dev)) - mod_timer(&priv->poll_timer, jiffies + 1); - - spin_unlock_irqrestore(&priv->tx_lock, flags); -} - -void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) -{ - drain_tx_cq((struct net_device *)dev_ptr); -} - static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, @@ -575,22 +555,12 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, else priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; - if (++priv->tx_outstanding == ipoib_sendq_size) { - ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) - ipoib_warn(priv, "request notify on send CQ failed\n"); - netif_stop_queue(dev); - } - if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen))) { ipoib_warn(priv, "post_send failed\n"); ++dev->stats.tx_errors; - --priv->tx_outstanding; ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(skb); - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); } else { dev->trans_start = jiffies; @@ -598,11 +568,14 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ++priv->tx_head; skb_orphan(skb); + if (++priv->tx_outstanding == ipoib_sendq_size) { + ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); + netif_stop_queue(dev); + } } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) - while (poll_tx(priv)) - ; /* nothing */ + poll_tx(priv); } static void __ipoib_reap_ah(struct net_device *dev) @@ -636,11 +609,6 @@ void ipoib_reap_ah(struct work_struct *work) round_jiffies_relative(HZ)); } -static void ipoib_ib_tx_timer_func(unsigned long ctx) -{ - drain_tx_cq((struct net_device *)ctx); -} - int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -677,10 +645,6 @@ int ipoib_ib_dev_open(struct net_device *dev) queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); - init_timer(&priv->poll_timer); - priv->poll_timer.function = ipoib_ib_tx_timer_func; - priv->poll_timer.data = (unsigned long)dev; - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); return 0; @@ -846,7 +810,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) ipoib_dbg(priv, "All sends and receives done.\n"); timeout: - del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 8766d29ce3b7..c1e7ece1fd44 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -187,8 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) goto out_free_mr; } - priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, - dev, ipoib_sendq_size, 0); + priv->send_cq = ib_create_cq(priv->ca, NULL, NULL, dev, ipoib_sendq_size, 0); if (IS_ERR(priv->send_cq)) { printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name); goto out_free_recv_cq; diff --git a/trunk/drivers/input/misc/Kconfig b/trunk/drivers/input/misc/Kconfig index 3ad8bd9f7543..92b683411d5a 100644 --- a/trunk/drivers/input/misc/Kconfig +++ b/trunk/drivers/input/misc/Kconfig @@ -14,7 +14,7 @@ if INPUT_MISC config INPUT_PCSPKR tristate "PC Speaker support" - depends on PCSPKR_PLATFORM + depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES depends on SND_PCSP=n help Say Y here if you want the standard PC Speaker to be used for diff --git a/trunk/drivers/input/serio/hp_sdc.c b/trunk/drivers/input/serio/hp_sdc.c index edfedd9a166c..02b3ad8c0826 100644 --- a/trunk/drivers/input/serio/hp_sdc.c +++ b/trunk/drivers/input/serio/hp_sdc.c @@ -69,7 +69,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/input/serio/i8042-io.h b/trunk/drivers/input/serio/i8042-io.h index f451c7351a9d..3b4e13b9ce1b 100644 --- a/trunk/drivers/input/serio/i8042-io.h +++ b/trunk/drivers/input/serio/i8042-io.h @@ -25,7 +25,7 @@ #elif defined(__arm__) /* defined in include/asm-arm/arch-xxx/irqs.h */ #include -#elif defined(CONFIG_SH_CAYMAN) +#elif defined(CONFIG_SUPERH64) #include #else # define I8042_KBD_IRQ 1 diff --git a/trunk/drivers/macintosh/adb.c b/trunk/drivers/macintosh/adb.c index b8b9e44f7f4e..20978205cd02 100644 --- a/trunk/drivers/macintosh/adb.c +++ b/trunk/drivers/macintosh/adb.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #ifdef CONFIG_PPC @@ -102,7 +102,7 @@ static struct adb_handler { } adb_handler[16]; /* - * The adb_handler_mutex mutex protects all accesses to the original_address + * The adb_handler_sem mutex protects all accesses to the original_address * and handler_id fields of adb_handler[i] for all i, and changes to the * handler field. * Accesses to the handler field are protected by the adb_handler_lock @@ -110,7 +110,7 @@ static struct adb_handler { * time adb_unregister returns, we know that the old handler isn't being * called. */ -static DEFINE_MUTEX(adb_handler_mutex); +static DECLARE_MUTEX(adb_handler_sem); static DEFINE_RWLOCK(adb_handler_lock); #if 0 @@ -355,7 +355,7 @@ do_adb_reset_bus(void) msleep(500); } - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); write_lock_irq(&adb_handler_lock); memset(adb_handler, 0, sizeof(adb_handler)); write_unlock_irq(&adb_handler_lock); @@ -376,7 +376,7 @@ do_adb_reset_bus(void) if (adb_controller->autopoll) adb_controller->autopoll(autopoll_devs); } - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); blocking_notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL); @@ -454,7 +454,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids, { int i; - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); ids->nids = 0; for (i = 1; i < 16; i++) { if ((adb_handler[i].original_address == default_id) && @@ -472,7 +472,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids, ids->id[ids->nids++] = i; } } - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); return ids->nids; } @@ -481,7 +481,7 @@ adb_unregister(int index) { int ret = -ENODEV; - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); write_lock_irq(&adb_handler_lock); if (adb_handler[index].handler) { while(adb_handler[index].busy) { @@ -493,7 +493,7 @@ adb_unregister(int index) adb_handler[index].handler = NULL; } write_unlock_irq(&adb_handler_lock); - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); return ret; } @@ -557,19 +557,19 @@ adb_try_handler_change(int address, int new_id) { int ret; - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); ret = try_handler_change(address, new_id); - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); return ret; } int adb_get_infos(int address, int *original_address, int *handler_id) { - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); *original_address = adb_handler[address].original_address; *handler_id = adb_handler[address].handler_id; - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); return (*original_address != 0); } @@ -628,10 +628,10 @@ do_adb_query(struct adb_request *req) case ADB_QUERY_GETDEVINFO: if (req->nbytes < 3) break; - mutex_lock(&adb_handler_mutex); + down(&adb_handler_sem); req->reply[0] = adb_handler[req->data[2]].original_address; req->reply[1] = adb_handler[req->data[2]].handler_id; - mutex_unlock(&adb_handler_mutex); + up(&adb_handler_sem); req->complete = 1; req->reply_len = 2; adb_write_done(req); diff --git a/trunk/drivers/macintosh/therm_pm72.c b/trunk/drivers/macintosh/therm_pm72.c index ddfb426a9abd..1e0a69a5e815 100644 --- a/trunk/drivers/macintosh/therm_pm72.c +++ b/trunk/drivers/macintosh/therm_pm72.c @@ -122,7 +122,6 @@ #include #include #include -#include #include #include #include @@ -170,7 +169,7 @@ static int rackmac; static s32 dimm_output_clamp; static int fcu_rpm_shift; static int fcu_tickle_ticks; -static DEFINE_MUTEX(driver_lock); +static DECLARE_MUTEX(driver_lock); /* * We have 3 types of CPU PID control. One is "split" old style control @@ -730,9 +729,9 @@ static void fetch_cpu_pumps_minmax(void) static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ ssize_t r; \ - mutex_lock(&driver_lock); \ + down(&driver_lock); \ r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \ - mutex_unlock(&driver_lock); \ + up(&driver_lock); \ return r; \ } #define BUILD_SHOW_FUNC_INT(name, data) \ @@ -1804,11 +1803,11 @@ static int main_control_loop(void *x) { DBG("main_control_loop started\n"); - mutex_lock(&driver_lock); + down(&driver_lock); if (start_fcu() < 0) { printk(KERN_ERR "kfand: failed to start FCU\n"); - mutex_unlock(&driver_lock); + up(&driver_lock); goto out; } @@ -1823,14 +1822,14 @@ static int main_control_loop(void *x) fcu_tickle_ticks = FCU_TICKLE_TICKS; - mutex_unlock(&driver_lock); + up(&driver_lock); while (state == state_attached) { unsigned long elapsed, start; start = jiffies; - mutex_lock(&driver_lock); + down(&driver_lock); /* Tickle the FCU just in case */ if (--fcu_tickle_ticks < 0) { @@ -1862,7 +1861,7 @@ static int main_control_loop(void *x) do_monitor_slots(&slots_state); else do_monitor_drives(&drives_state); - mutex_unlock(&driver_lock); + up(&driver_lock); if (critical_state == 1) { printk(KERN_WARNING "Temperature control detected a critical condition\n"); @@ -2020,13 +2019,13 @@ static void detach_fcu(void) */ static int therm_pm72_attach(struct i2c_adapter *adapter) { - mutex_lock(&driver_lock); + down(&driver_lock); /* Check state */ if (state == state_detached) state = state_attaching; if (state != state_attaching) { - mutex_unlock(&driver_lock); + up(&driver_lock); return 0; } @@ -2055,7 +2054,7 @@ static int therm_pm72_attach(struct i2c_adapter *adapter) state = state_attached; start_control_loops(); } - mutex_unlock(&driver_lock); + up(&driver_lock); return 0; } @@ -2066,16 +2065,16 @@ static int therm_pm72_attach(struct i2c_adapter *adapter) */ static int therm_pm72_detach(struct i2c_adapter *adapter) { - mutex_lock(&driver_lock); + down(&driver_lock); if (state != state_detached) state = state_detaching; /* Stop control loops if any */ DBG("stopping control loops\n"); - mutex_unlock(&driver_lock); + up(&driver_lock); stop_control_loops(); - mutex_lock(&driver_lock); + down(&driver_lock); if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) { DBG("lost U3-0, disposing control loops\n"); @@ -2091,7 +2090,7 @@ static int therm_pm72_detach(struct i2c_adapter *adapter) if (u3_0 == NULL && u3_1 == NULL) state = state_detached; - mutex_unlock(&driver_lock); + up(&driver_lock); return 0; } diff --git a/trunk/drivers/macintosh/windfarm_smu_sat.c b/trunk/drivers/macintosh/windfarm_smu_sat.c index 7f2be4baaeda..797918d0e59c 100644 --- a/trunk/drivers/macintosh/windfarm_smu_sat.c +++ b/trunk/drivers/macintosh/windfarm_smu_sat.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -36,7 +36,7 @@ struct wf_sat { int nr; atomic_t refcnt; - struct mutex mutex; + struct semaphore mutex; unsigned long last_read; /* jiffies when cache last updated */ u8 cache[16]; struct i2c_client i2c; @@ -163,7 +163,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value) if (sat->i2c.adapter == NULL) return -ENODEV; - mutex_lock(&sat->mutex); + down(&sat->mutex); if (time_after(jiffies, (sat->last_read + MAX_AGE))) { err = wf_sat_read_cache(sat); if (err) @@ -182,7 +182,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value) err = 0; fail: - mutex_unlock(&sat->mutex); + up(&sat->mutex); return err; } @@ -233,7 +233,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) sat->nr = -1; sat->node = of_node_get(dev); atomic_set(&sat->refcnt, 0); - mutex_init(&sat->mutex); + init_MUTEX(&sat->mutex); sat->i2c.addr = (addr >> 1) & 0x7f; sat->i2c.adapter = adapter; sat->i2c.driver = &wf_sat_driver; diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index faf3d8912979..5938fa962922 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -886,7 +886,7 @@ static int make_request(struct request_queue *q, struct bio * bio) */ raid10_find_phys(conf, r10_bio); retry_write: - blocked_rdev = NULL; + blocked_rdev = 0; rcu_read_lock(); for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; diff --git a/trunk/drivers/media/Makefile b/trunk/drivers/media/Makefile index cc11c4c0e7e7..73f742c7e818 100644 --- a/trunk/drivers/media/Makefile +++ b/trunk/drivers/media/Makefile @@ -2,8 +2,6 @@ # Makefile for the kernel multimedia device drivers. # -obj-y := common/ - obj-$(CONFIG_VIDEO_MEDIA) += common/ # Since hybrid devices are here, should be compiled if DVB and/or V4L diff --git a/trunk/drivers/media/video/cx18/cx18-driver.c b/trunk/drivers/media/video/cx18/cx18-driver.c index 3f55d47bc4b9..8f5ed9b4bf83 100644 --- a/trunk/drivers/media/video/cx18/cx18-driver.c +++ b/trunk/drivers/media/video/cx18/cx18-driver.c @@ -613,7 +613,7 @@ static int __devinit cx18_probe(struct pci_dev *dev, } cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC); - if (!cx) { + if (cx == 0) { spin_unlock(&cx18_cards_lock); return -ENOMEM; } diff --git a/trunk/drivers/media/video/saa7134/saa7134-video.c b/trunk/drivers/media/video/saa7134/saa7134-video.c index 48e1a01718ec..a0baf2d0ba7f 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-video.c +++ b/trunk/drivers/media/video/saa7134/saa7134-video.c @@ -1634,7 +1634,7 @@ static int saa7134_s_fmt_overlay(struct file *file, void *priv, struct saa7134_fh *fh = priv; struct saa7134_dev *dev = fh->dev; int err; - unsigned long flags; + unsigned int flags; if (saa7134_no_overlay > 0) { printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n"); diff --git a/trunk/drivers/misc/kgdbts.c b/trunk/drivers/misc/kgdbts.c index fa394104339c..30a1af857c7a 100644 --- a/trunk/drivers/misc/kgdbts.c +++ b/trunk/drivers/misc/kgdbts.c @@ -47,7 +47,6 @@ * to test the HW NMI watchdog * F## = Break at do_fork for ## iterations * S## = Break at sys_open for ## iterations - * I## = Run the single step test ## iterations * * NOTE: that the do_fork and sys_open tests are mutually exclusive. * @@ -376,7 +375,7 @@ static void emul_sstep_get(char *arg) break; case 1: /* set breakpoint */ - break_helper("Z0", NULL, sstep_addr); + break_helper("Z0", 0, sstep_addr); break; case 2: /* Continue */ @@ -384,7 +383,7 @@ static void emul_sstep_get(char *arg) break; case 3: /* Clear breakpoint */ - break_helper("z0", NULL, sstep_addr); + break_helper("z0", 0, sstep_addr); break; default: eprintk("kgdbts: ERROR failed sstep get emulation\n"); @@ -466,11 +465,11 @@ static struct test_struct sw_breakpoint_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ - { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "D", "OK", 0, got_break }, /* If the test worked we made it here */ { "", "" }, }; @@ -500,14 +499,14 @@ static struct test_struct singlestep_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ - { "g", "kgdbts_break_test", NULL, check_single_step }, + { "g", "kgdbts_break_test", 0, check_single_step }, { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ { "D", "OK" }, /* Remove all breakpoints and continues */ { "", "" }, @@ -521,14 +520,14 @@ static struct test_struct do_fork_test[] = { { "?", "S0*" }, /* Clear break points */ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ + { "g", "do_fork", 0, check_and_rewind_pc }, /* check location */ { "write", "OK", write_regs }, /* Write registers */ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ - { "g", "do_fork", NULL, check_single_step }, + { "g", "do_fork", 0, check_single_step }, { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ - { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */ { "", "" }, }; @@ -539,14 +538,14 @@ static struct test_struct sys_open_test[] = { { "?", "S0*" }, /* Clear break points */ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ + { "g", "sys_open", 0, check_and_rewind_pc }, /* check location */ { "write", "OK", write_regs }, /* Write registers */ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ - { "g", "sys_open", NULL, check_single_step }, + { "g", "sys_open", 0, check_single_step }, { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ - { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */ { "", "" }, }; @@ -557,11 +556,11 @@ static struct test_struct hw_breakpoint_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */ { "c", "T0*", }, /* Continue */ - { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ - { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "D", "OK", 0, got_break }, /* If the test worked we made it here */ { "", "" }, }; @@ -571,12 +570,12 @@ static struct test_struct hw_breakpoint_test[] = { static struct test_struct hw_write_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */ - { "c", "T0*", NULL, got_break }, /* Continue */ - { "g", "silent", NULL, check_and_rewind_pc }, + { "c", "T0*", 0, got_break }, /* Continue */ + { "g", "silent", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ - { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "D", "OK", 0, got_break }, /* If the test worked we made it here */ { "", "" }, }; @@ -586,12 +585,12 @@ static struct test_struct hw_write_break_test[] = { static struct test_struct hw_access_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */ - { "c", "T0*", NULL, got_break }, /* Continue */ - { "g", "silent", NULL, check_and_rewind_pc }, + { "c", "T0*", 0, got_break }, /* Continue */ + { "g", "silent", 0, check_and_rewind_pc }, { "write", "OK", write_regs }, { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ - { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "D", "OK", 0, got_break }, /* If the test worked we made it here */ { "", "" }, }; @@ -600,9 +599,9 @@ static struct test_struct hw_access_break_test[] = { */ static struct test_struct nmi_sleep_test[] = { { "?", "S0*" }, /* Clear break points */ - { "c", "T0*", NULL, got_break }, /* Continue */ + { "c", "T0*", 0, got_break }, /* Continue */ { "D", "OK" }, /* Detach */ - { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "D", "OK", 0, got_break }, /* If the test worked we made it here */ { "", "" }, }; @@ -875,23 +874,18 @@ static void kgdbts_run_tests(void) { char *ptr; int fork_test = 0; - int do_sys_open_test = 0; - int sstep_test = 1000; + int sys_open_test = 0; int nmi_sleep = 0; - int i; ptr = strstr(config, "F"); if (ptr) - fork_test = simple_strtol(ptr + 1, NULL, 10); + fork_test = simple_strtol(ptr+1, NULL, 10); ptr = strstr(config, "S"); if (ptr) - do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); + sys_open_test = simple_strtol(ptr+1, NULL, 10); ptr = strstr(config, "N"); if (ptr) nmi_sleep = simple_strtol(ptr+1, NULL, 10); - ptr = strstr(config, "I"); - if (ptr) - sstep_test = simple_strtol(ptr+1, NULL, 10); /* required internal KGDB tests */ v1printk("kgdbts:RUN plant and detach test\n"); @@ -900,13 +894,8 @@ static void kgdbts_run_tests(void) run_breakpoint_test(0); v1printk("kgdbts:RUN bad memory access test\n"); run_bad_read_test(); - v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test); - for (i = 0; i < sstep_test; i++) { - run_singlestep_break_test(); - if (i % 100 == 0) - v1printk("kgdbts:RUN singlestep [%i/%i]\n", - i, sstep_test); - } + v1printk("kgdbts:RUN singlestep breakpoint test\n"); + run_singlestep_break_test(); /* ===Optional tests=== */ @@ -933,7 +922,7 @@ static void kgdbts_run_tests(void) repeat_test = fork_test; printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n", repeat_test); - kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg"); run_do_fork_test(); return; } @@ -942,11 +931,11 @@ static void kgdbts_run_tests(void) * executed because a kernel thread will be spawned at the very * end to unregister the debug hooks. */ - if (do_sys_open_test) { - repeat_test = do_sys_open_test; + if (sys_open_test) { + repeat_test = sys_open_test; printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n", repeat_test); - kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg"); run_sys_open_test(); return; } diff --git a/trunk/drivers/mmc/host/mmci.c b/trunk/drivers/mmc/host/mmci.c index da5fecad74d9..626ac083f4e0 100644 --- a/trunk/drivers/mmc/host/mmci.c +++ b/trunk/drivers/mmc/host/mmci.c @@ -425,7 +425,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->cclk = host->mclk; } else { clk = host->mclk / (2 * ios->clock) - 1; - if (clk >= 256) + if (clk > 256) clk = 255; host->cclk = host->mclk / (2 * (clk + 1)); } @@ -512,18 +512,6 @@ static int mmci_probe(struct amba_device *dev, void *id) host->plat = plat; host->mclk = clk_get_rate(host->clk); - /* - * According to the spec, mclk is max 100 MHz, - * so we try to adjust the clock down to this, - * (if possible). - */ - if (host->mclk > 100000000) { - ret = clk_set_rate(host->clk, 100000000); - if (ret < 0) - goto clk_disable; - host->mclk = clk_get_rate(host->clk); - DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); - } host->mmc = mmc; host->base = ioremap(dev->res.start, SZ_4K); if (!host->base) { diff --git a/trunk/drivers/mtd/maps/Kconfig b/trunk/drivers/mtd/maps/Kconfig index 17bc87a43ff4..1bd69aa9e22a 100644 --- a/trunk/drivers/mtd/maps/Kconfig +++ b/trunk/drivers/mtd/maps/Kconfig @@ -374,7 +374,7 @@ config MTD_REDWOOD config MTD_SOLUTIONENGINE tristate "CFI Flash device mapped on Hitachi SolutionEngine" - depends on SUPERH && SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS + depends on SUPERH && MTD_CFI && MTD_REDBOOT_PARTS help This enables access to the flash chips on the Hitachi SolutionEngine and similar boards. Say 'Y' if you are building a kernel for such a board. @@ -480,6 +480,13 @@ config MTD_H720X This enables access to the flash chips on the Hynix evaluation boards. If you have such a board, say 'Y'. +config MTD_MPC1211 + tristate "CFI Flash device mapped on Interface MPC-1211" + depends on SH_MPC1211 && MTD_CFI + help + This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02). + If you have such a board, say 'Y'. + config MTD_OMAP_NOR tristate "TI OMAP board mappings" depends on MTD_CFI && ARCH_OMAP diff --git a/trunk/drivers/mtd/maps/Makefile b/trunk/drivers/mtd/maps/Makefile index 957fb5f70f5e..a9cbe80f99a0 100644 --- a/trunk/drivers/mtd/maps/Makefile +++ b/trunk/drivers/mtd/maps/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_WALNUT) += walnut.o obj-$(CONFIG_MTD_H720X) += h720x-flash.o obj-$(CONFIG_MTD_SBC8240) += sbc8240.o obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o +obj-$(CONFIG_MTD_MPC1211) += mpc1211.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o obj-$(CONFIG_MTD_IXP2000) += ixp2000.o obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o diff --git a/trunk/drivers/mtd/maps/mpc1211.c b/trunk/drivers/mtd/maps/mpc1211.c new file mode 100644 index 000000000000..45a00fac88ac --- /dev/null +++ b/trunk/drivers/mtd/maps/mpc1211.c @@ -0,0 +1,80 @@ +/* + * Flash on MPC-1211 + * + * $Id: mpc1211.c,v 1.4 2004/09/16 23:27:13 gleixner Exp $ + * + * (C) 2002 Interface, Saito.K & Jeanne + * + * GPL'd + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct mtd_info *flash_mtd; +static struct mtd_partition *parsed_parts; + +struct map_info mpc1211_flash_map = { + .name = "MPC-1211 FLASH", + .size = 0x80000, + .bankwidth = 1, +}; + +static struct mtd_partition mpc1211_partitions[] = { + { + .name = "IPL & ETH-BOOT", + .offset = 0x00000000, + .size = 0x10000, + }, + { + .name = "Flash FS", + .offset = 0x00010000, + .size = MTDPART_SIZ_FULL, + } +}; + +static int __init init_mpc1211_maps(void) +{ + int nr_parts; + + mpc1211_flash_map.phys = 0; + mpc1211_flash_map.virt = (void __iomem *)P2SEGADDR(0); + + simple_map_init(&mpc1211_flash_map); + + printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n"); + flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map); + if (!flash_mtd) { + printk(KERN_NOTICE "Flash chips not detected at either possible location.\n"); + return -ENXIO; + } + printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff); + flash_mtd->module = THIS_MODULE; + + parsed_parts = mpc1211_partitions; + nr_parts = ARRAY_SIZE(mpc1211_partitions); + + add_mtd_partitions(flash_mtd, parsed_parts, nr_parts); + return 0; +} + +static void __exit cleanup_mpc1211_maps(void) +{ + if (parsed_parts) + del_mtd_partitions(flash_mtd); + else + del_mtd_device(flash_mtd); + map_destroy(flash_mtd); +} + +module_init(init_mpc1211_maps); +module_exit(cleanup_mpc1211_maps); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Saito.K & Jeanne "); +MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface"); diff --git a/trunk/drivers/net/3c59x.c b/trunk/drivers/net/3c59x.c index 2edda8cc7f99..6f8e7d4cf74d 100644 --- a/trunk/drivers/net/3c59x.c +++ b/trunk/drivers/net/3c59x.c @@ -319,7 +319,7 @@ static struct vortex_chip_info { {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, @@ -600,6 +600,7 @@ struct vortex_private { struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + struct net_device_stats stats; /* Generic stats */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ @@ -1874,7 +1875,7 @@ static void vortex_tx_timeout(struct net_device *dev) issue_and_wait(dev, TxReset); - dev->stats.tx_errors++; + vp->stats.tx_errors++; if (vp->full_bus_master_tx) { printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) @@ -1886,7 +1887,7 @@ static void vortex_tx_timeout(struct net_device *dev) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { - dev->stats.tx_dropped++; + vp->stats.tx_dropped++; netif_wake_queue(dev); } @@ -1927,8 +1928,8 @@ vortex_error(struct net_device *dev, int status) } dump_tx_ring(dev); } - if (tx_status & 0x14) dev->stats.tx_fifo_errors++; - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x14) vp->stats.tx_fifo_errors++; + if (tx_status & 0x38) vp->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ @@ -2050,8 +2051,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vortex_debug > 2) printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", dev->name, tx_status); - if (tx_status & 0x04) dev->stats.tx_fifo_errors++; - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x04) vp->stats.tx_fifo_errors++; + if (tx_status & 0x38) vp->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } @@ -2349,7 +2350,7 @@ boomerang_interrupt(int irq, void *dev_id) } else { printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); } - /* dev->stats.tx_packets++; Counted below. */ + /* vp->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; @@ -2408,12 +2409,12 @@ static int vortex_rx(struct net_device *dev) unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) dev->stats.rx_over_errors++; - if (rx_error & 0x02) dev->stats.rx_length_errors++; - if (rx_error & 0x04) dev->stats.rx_frame_errors++; - if (rx_error & 0x08) dev->stats.rx_crc_errors++; - if (rx_error & 0x10) dev->stats.rx_length_errors++; + vp->stats.rx_errors++; + if (rx_error & 0x01) vp->stats.rx_over_errors++; + if (rx_error & 0x02) vp->stats.rx_length_errors++; + if (rx_error & 0x04) vp->stats.rx_frame_errors++; + if (rx_error & 0x08) vp->stats.rx_crc_errors++; + if (rx_error & 0x10) vp->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2445,7 +2446,7 @@ static int vortex_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - dev->stats.rx_packets++; + vp->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) @@ -2454,7 +2455,7 @@ static int vortex_rx(struct net_device *dev) } else if (vortex_debug > 0) printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %d.\n", dev->name, pkt_len); - dev->stats.rx_dropped++; + vp->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } @@ -2481,12 +2482,12 @@ boomerang_rx(struct net_device *dev) unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) dev->stats.rx_over_errors++; - if (rx_error & 0x02) dev->stats.rx_length_errors++; - if (rx_error & 0x04) dev->stats.rx_frame_errors++; - if (rx_error & 0x08) dev->stats.rx_crc_errors++; - if (rx_error & 0x10) dev->stats.rx_length_errors++; + vp->stats.rx_errors++; + if (rx_error & 0x01) vp->stats.rx_over_errors++; + if (rx_error & 0x02) vp->stats.rx_length_errors++; + if (rx_error & 0x04) vp->stats.rx_frame_errors++; + if (rx_error & 0x08) vp->stats.rx_crc_errors++; + if (rx_error & 0x10) vp->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2528,7 +2529,7 @@ boomerang_rx(struct net_device *dev) } netif_rx(skb); dev->last_rx = jiffies; - dev->stats.rx_packets++; + vp->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } @@ -2590,7 +2591,7 @@ vortex_down(struct net_device *dev, int final_down) del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); - /* Turn off statistics ASAP. We update dev->stats below. */ + /* Turn off statistics ASAP. We update vp->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ @@ -2727,7 +2728,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev) update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } - return &dev->stats; + return &vp->stats; } /* Update statistics. @@ -2747,18 +2748,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ EL3WINDOW(6); - dev->stats.tx_carrier_errors += ioread8(ioaddr + 0); - dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); - dev->stats.tx_window_errors += ioread8(ioaddr + 4); - dev->stats.rx_fifo_errors += ioread8(ioaddr + 5); - dev->stats.tx_packets += ioread8(ioaddr + 6); - dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; + vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); + vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); + vp->stats.tx_window_errors += ioread8(ioaddr + 4); + vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); + vp->stats.tx_packets += ioread8(ioaddr + 6); + vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ - dev->stats.rx_bytes += ioread16(ioaddr + 10); - dev->stats.tx_bytes += ioread16(ioaddr + 12); + vp->stats.rx_bytes += ioread16(ioaddr + 10); + vp->stats.tx_bytes += ioread16(ioaddr + 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); @@ -2766,14 +2767,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) EL3WINDOW(4); vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); - dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = ioread8(ioaddr + 13); - dev->stats.rx_bytes += (up & 0x0f) << 16; - dev->stats.tx_bytes += (up & 0xf0) << 12; + vp->stats.rx_bytes += (up & 0x0f) << 16; + vp->stats.tx_bytes += (up & 0xf0) << 12; } EL3WINDOW(old_window >> 13); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index d27f54a2df77..af46341827f2 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -1273,6 +1273,20 @@ config PCNET32 To compile this driver as a module, choose M here. The module will be called pcnet32. +config PCNET32_NAPI + bool "Use RX polling (NAPI)" + depends on PCNET32 + help + NAPI is a new driver API designed to reduce CPU and interrupt load + when the driver is receiving lots of packets from the card. It is + still somewhat experimental and thus not yet enabled by default. + + If your estimated Rx load is 10kpps or more, or if the card will be + deployed on potentially unfriendly networks (e.g. in a firewall), + then say Y here. + + If in doubt, say N. + config AMD8111_ETH tristate "AMD 8111 (new PCI lance) support" depends on NET_PCI && PCI diff --git a/trunk/drivers/net/appletalk/cops.c b/trunk/drivers/net/appletalk/cops.c index a0b4c8516073..82e9a5bd0dd2 100644 --- a/trunk/drivers/net/appletalk/cops.c +++ b/trunk/drivers/net/appletalk/cops.c @@ -499,13 +499,19 @@ static void cops_reset(struct net_device *dev, int sleep) { outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ inb(ioaddr+DAYNA_RESET); /* Clear the reset */ - if (sleep) - msleep(333); - else - mdelay(333); - } + if(sleep) + { + long snap=jiffies; + /* Let card finish initializing, about 1/3 second */ + while (time_before(jiffies, snap + HZ/3)) + schedule(); + } + else + mdelay(333); + } netif_wake_queue(dev); + return; } static void cops_load (struct net_device *dev) diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 50a40e433154..6425603bc379 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = netdev_set_master(slave_dev, bond_dev); if (res) { dprintk("Error %d calling netdev_set_master\n", res); - goto err_restore_mac; + goto err_close; } /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { dprintk("Openning slave %s failed\n", slave_dev->name); - goto err_unset_master; + goto err_restore_mac; } new_slave->dev = slave_dev; @@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) */ res = bond_alb_init_slave(bond, new_slave); if (res) { - goto err_close; + goto err_unset_master; } } @@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_close; + goto err_unset_master; printk(KERN_INFO DRV_NAME ": %s: enslaving %s as a%s interface with a%s link.\n", @@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ -err_close: - dev_close(slave_dev); - err_unset_master: netdev_set_master(slave_dev, NULL); +err_close: + dev_close(slave_dev); + err_restore_mac: if (!bond->params.fail_over_mac) { memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); @@ -4936,9 +4936,7 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond if (res < 0) { rtnl_lock(); down_write(&bonding_rwsem); - bond_deinit(bond_dev); - unregister_netdevice(bond_dev); - goto out_rtnl; + goto out_bond; } return 0; @@ -4992,10 +4990,9 @@ static int __init bonding_init(void) destroy_workqueue(bond->wq); } - bond_destroy_sysfs(); - rtnl_lock(); bond_free_all(); + bond_destroy_sysfs(); rtnl_unlock(); out: return res; @@ -5007,10 +5004,9 @@ static void __exit bonding_exit(void) unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier); - bond_destroy_sysfs(); - rtnl_lock(); bond_free_all(); + bond_destroy_sysfs(); rtnl_unlock(); } diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c index 68c41a00d93d..979c2d05ff9c 100644 --- a/trunk/drivers/net/bonding/bond_sysfs.c +++ b/trunk/drivers/net/bonding/bond_sysfs.c @@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t ": Unable remove bond %s due to open references.\n", ifname); res = -EPERM; - goto out_unlock; + goto out; } printk(KERN_INFO DRV_NAME ": %s is being deleted...\n", bond->dev->name); bond_destroy(bond); - goto out_unlock; + up_write(&bonding_rwsem); + rtnl_unlock(); + goto out; } printk(KERN_ERR DRV_NAME ": unable to delete non-existent bond %s\n", ifname); res = -ENODEV; - goto out_unlock; + up_write(&bonding_rwsem); + rtnl_unlock(); + goto out; } err_no_cmd: printk(KERN_ERR DRV_NAME ": no command found in bonding_masters. Use +ifname or -ifname.\n"); - return -EPERM; - -out_unlock: - up_write(&bonding_rwsem); - rtnl_unlock(); + res = -EPERM; /* Always return either count or an error. If you return 0, you'll * get called forever, which is bad. diff --git a/trunk/drivers/net/cxgb3/cxgb3_main.c b/trunk/drivers/net/cxgb3/cxgb3_main.c index ce949d5fae39..05e5f59e87fa 100644 --- a/trunk/drivers/net/cxgb3/cxgb3_main.c +++ b/trunk/drivers/net/cxgb3/cxgb3_main.c @@ -1894,11 +1894,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) u8 *fw_data; struct ch_mem_range t; - if (!capable(CAP_SYS_RAWIO)) + if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; - /* Check t.len sanity ? */ + fw_data = kmalloc(t.len, GFP_KERNEL); if (!fw_data) return -ENOMEM; diff --git a/trunk/drivers/net/e1000e/defines.h b/trunk/drivers/net/e1000e/defines.h index f823b8ba5785..2a53875cddbf 100644 --- a/trunk/drivers/net/e1000e/defines.h +++ b/trunk/drivers/net/e1000e/defines.h @@ -648,8 +648,6 @@ #define IFE_E_PHY_ID 0x02A80330 #define IFE_PLUS_E_PHY_ID 0x02A80320 #define IFE_C_E_PHY_ID 0x02A80310 -#define BME1000_E_PHY_ID 0x01410CB0 -#define BME1000_E_PHY_ID_R2 0x01410CB1 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -703,14 +701,6 @@ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 -/* BME1000 PHY Specific Control Register */ -#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ - - -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ - ((reg) & MAX_PHY_REG_ADDRESS)) - /* * Bits... * 15-5: page diff --git a/trunk/drivers/net/e1000e/e1000.h b/trunk/drivers/net/e1000e/e1000.h index d3bc6f8101fa..38bfd0d261fe 100644 --- a/trunk/drivers/net/e1000e/e1000.h +++ b/trunk/drivers/net/e1000e/e1000.h @@ -127,7 +127,7 @@ struct e1000_buffer { /* arrays of page information for packet split */ struct e1000_ps_page *ps_pages; }; - struct page *page; + }; struct e1000_ring { @@ -304,7 +304,6 @@ struct e1000_info { #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) #define FLAG_HAS_JUMBO_FRAMES (1 << 7) -#define FLAG_IS_ICH (1 << 9) #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) #define FLAG_IS_QUAD_PORT (1 << 13) @@ -387,7 +386,6 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); -extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); @@ -445,9 +443,6 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); -extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); -extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/trunk/drivers/net/e1000e/ethtool.c b/trunk/drivers/net/e1000e/ethtool.c index a14561f40db0..ce045acce63e 100644 --- a/trunk/drivers/net/e1000e/ethtool.c +++ b/trunk/drivers/net/e1000e/ethtool.c @@ -494,12 +494,8 @@ static int e1000_get_eeprom(struct net_device *netdev, for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, &eeprom_buff[i]); - if (ret_val) { - /* a read error occurred, throw away the - * result */ - memset(eeprom_buff, 0xff, sizeof(eeprom_buff)); + if (ret_val) break; - } } } @@ -807,7 +803,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) /* restore previous status */ ew32(STATUS, before); - if (!(adapter->flags & FLAG_IS_ICH)) { + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) { REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); @@ -827,13 +824,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); - before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + before = (((mac->type == e1000_ich8lan) || + (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - if (!(adapter->flags & FLAG_IS_ICH)) + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); @@ -912,7 +911,9 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (i = 0; i < 10; i++) { - if ((adapter->flags & FLAG_IS_ICH) && (i == 8)) + + if (((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) continue; /* Interrupt to test */ @@ -1183,7 +1184,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; u32 stat_reg = 0; - u16 phy_reg = 0; hw->mac.autoneg = 0; @@ -1211,28 +1211,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_100 |/* Force Speed to 100 */ E1000_CTRL_FD); /* Force Duplex to FULL */ break; - case e1000_phy_bm: - /* Set Default MAC Interface speed to 1GB */ - e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); - phy_reg &= ~0x0007; - phy_reg |= 0x006; - e1e_wphy(hw, PHY_REG(2, 21), phy_reg); - /* Assert SW reset for above settings to take effect */ - e1000e_commit_phy(hw); - mdelay(1); - /* Force Full Duplex */ - e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); - /* Set Link Up (in force link) */ - e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); - /* Force Link */ - e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); - /* Set Early Link Enable */ - e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); - /* fall through */ default: /* force 1000, set loopback */ e1e_wphy(hw, PHY_CONTROL, 0x4140); @@ -1246,7 +1224,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_FD); /* Force Duplex to FULL */ - if (adapter->flags & FLAG_IS_ICH) + if ((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ } diff --git a/trunk/drivers/net/e1000e/hw.h b/trunk/drivers/net/e1000e/hw.h index 74f263acb172..a930e6d9cf02 100644 --- a/trunk/drivers/net/e1000e/hw.h +++ b/trunk/drivers/net/e1000e/hw.h @@ -216,21 +216,6 @@ enum e1e_registers { #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ -#define IGP_PAGE_SHIFT 5 -#define PHY_REG_MASK 0x1F - -#define BM_WUC_PAGE 800 -#define BM_WUC_ADDRESS_OPCODE 0x11 -#define BM_WUC_DATA_OPCODE 0x12 -#define BM_WUC_ENABLE_PAGE 769 -#define BM_WUC_ENABLE_REG 17 -#define BM_WUC_ENABLE_BIT (1 << 2) -#define BM_WUC_HOST_WU_BIT (1 << 4) - -#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) -#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) -#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 @@ -346,16 +331,10 @@ enum e1e_registers { #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 #define E1000_DEV_ID_ICH8_IGP_M 0x104D #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD -#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 -#define E1000_DEV_ID_ICH9_IGP_M 0x10BF -#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB #define E1000_DEV_ID_ICH9_IGP_C 0x294C #define E1000_DEV_ID_ICH9_IFE 0x10C0 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 -#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC -#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD -#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE #define E1000_FUNC_1 1 @@ -399,7 +378,6 @@ enum e1000_phy_type { e1000_phy_gg82563, e1000_phy_igp_3, e1000_phy_ife, - e1000_phy_bm, }; enum e1000_bus_width { diff --git a/trunk/drivers/net/e1000e/ich8lan.c b/trunk/drivers/net/e1000e/ich8lan.c index 9e38452a738c..768485dbb2c6 100644 --- a/trunk/drivers/net/e1000e/ich8lan.c +++ b/trunk/drivers/net/e1000e/ich8lan.c @@ -38,12 +38,6 @@ * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection - * 82567LM Gigabit Network Connection - * 82567LF Gigabit Network Connection - * 82567LM-2 Gigabit Network Connection - * 82567LF-2 Gigabit Network Connection - * 82567V-2 Gigabit Network Connection - * 82562GT-3 10/100 Network Connection */ #include @@ -204,19 +198,6 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->addr = 1; phy->reset_delay_us = 100; - /* - * We may need to do this twice - once for IGP and if that fails, - * we'll set BM func pointers and try again - */ - ret_val = e1000e_determine_phy_address(hw); - if (ret_val) { - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; - ret_val = e1000e_determine_phy_address(hw); - if (ret_val) - return ret_val; - } - phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { @@ -238,13 +219,6 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; break; - case BME1000_E_PHY_ID: - phy->type = e1000_phy_bm; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; - hw->phy.ops.commit_phy = e1000e_phy_sw_reset; - break; default: return -E1000_ERR_PHY; break; @@ -690,7 +664,6 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) return e1000_get_phy_info_ife_ich8lan(hw); break; case e1000_phy_igp_3: - case e1000_phy_bm: return e1000e_get_phy_info_igp(hw); break; default: @@ -755,7 +728,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) s32 ret_val = 0; u16 data; - if (phy->type == e1000_phy_ife) + if (phy->type != e1000_phy_igp_3) return ret_val; phy_ctrl = er32(PHY_CTRL); @@ -1945,35 +1918,8 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) ret_val = e1000e_copper_link_setup_igp(hw); if (ret_val) return ret_val; - } else if (hw->phy.type == e1000_phy_bm) { - ret_val = e1000e_copper_link_setup_m88(hw); - if (ret_val) - return ret_val; } - if (hw->phy.type == e1000_phy_ife) { - ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); - if (ret_val) - return ret_val; - - reg_data &= ~IFE_PMC_AUTO_MDIX; - - switch (hw->phy.mdix) { - case 1: - reg_data &= ~IFE_PMC_FORCE_MDIX; - break; - case 2: - reg_data |= IFE_PMC_FORCE_MDIX; - break; - case 0: - default: - reg_data |= IFE_PMC_AUTO_MDIX; - break; - } - ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); - if (ret_val) - return ret_val; - } return e1000e_setup_copper_link(hw); } @@ -2180,31 +2126,6 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) reg_data); } -/** - * e1000e_disable_gig_wol_ich8lan - disable gig during WoL - * @hw: pointer to the HW structure - * - * During S0 to Sx transition, it is possible the link remains at gig - * instead of negotiating to a lower speed. Before going to Sx, set - * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation - * to a lower speed. - * - * Should only be called for ICH9 devices. - **/ -void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) -{ - u32 phy_ctrl; - - if (hw->mac.type == e1000_ich9lan) { - phy_ctrl = er32(PHY_CTRL); - phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | - E1000_PHY_CTRL_GBE_DISABLE; - ew32(PHY_CTRL, phy_ctrl); - } - - return; -} - /** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure @@ -2326,7 +2247,6 @@ static struct e1000_nvm_operations ich8_nvm_ops = { struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL - | FLAG_IS_ICH | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT @@ -2342,7 +2262,6 @@ struct e1000_info e1000_ich8_info = { struct e1000_info e1000_ich9_info = { .mac = e1000_ich9lan, .flags = FLAG_HAS_JUMBO_FRAMES - | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD diff --git a/trunk/drivers/net/e1000e/netdev.c b/trunk/drivers/net/e1000e/netdev.c index 8cbb40f3a506..8991ab8911e2 100644 --- a/trunk/drivers/net/e1000e/netdev.c +++ b/trunk/drivers/net/e1000e/netdev.c @@ -43,11 +43,10 @@ #include #include #include -#include #include "e1000.h" -#define DRV_VERSION "0.3.3.3-k2" +#define DRV_VERSION "0.2.1" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -341,89 +340,6 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, } } -/** - * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers - * @adapter: address of board private structure - * @rx_ring: pointer to receive ring structure - * @cleaned_count: number of buffers to allocate this pass - **/ - -static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, - int cleaned_count) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = 256 - - 16 /* for skb_reserve */ - - NET_IP_ALIGN; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto check_page; - } - - skb = netdev_alloc_skb(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - - buffer_info->skb = skb; -check_page: - /* allocate a new page if necessary */ - if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); - if (unlikely(!buffer_info->page)) { - adapter->alloc_rx_buff_failed++; - break; - } - } - - if (!buffer_info->dma) - buffer_info->dma = pci_map_page(pdev, - buffer_info->page, 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); - - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - - if (unlikely(++i == rx_ring->count)) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) - i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); - } -} - /** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure @@ -866,186 +782,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, return cleaned; } -/** - * e1000_consume_page - helper function - **/ -static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) -{ - bi->page = NULL; - skb->len += length; - skb->data_len += length; - skb->truesize += length; -} - -/** - * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ - -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, - int *work_done, int work_to_do) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - u32 length; - unsigned int i; - int cleaned_count = 0; - bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & E1000_RXD_STAT_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - (*work_done)++; - - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - ++i; - if (i == rx_ring->count) - i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - - /* errors is only valid for DD + EOP descriptors */ - if (unlikely((status & E1000_RXD_STAT_EOP) && - (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - /* recycle both page and skb */ - buffer_info->skb = skb; - /* an error means any chain goes out the window - * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - goto next_desc; - } - -#define rxtop rx_ring->rx_skb_top - if (!(status & E1000_RXD_STAT_EOP)) { - /* this descriptor is only the beginning (or middle) */ - if (!rxtop) { - /* this is the beginning of a chain */ - rxtop = skb; - skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); - } else { - /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the skb, only consumed the page */ - buffer_info->skb = skb; - } - e1000_consume_page(buffer_info, rxtop, length); - goto next_desc; - } else { - if (rxtop) { - /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the current skb, we only consumed the - * page */ - buffer_info->skb = skb; - skb = rxtop; - rxtop = NULL; - e1000_consume_page(buffer_info, skb, length); - } else { - /* no chain, got EOP, this buf is the packet - * copybreak to save the put_page/alloc_page */ - if (length <= copybreak && - skb_tailroom(skb) >= length) { - u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page, - KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, - length); - kunmap_atomic(vaddr, - KM_SKB_DATA_SOFTIRQ); - /* re-use the page, so don't erase - * buffer_info->page */ - skb_put(skb, length); - } else { - skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); - e1000_consume_page(buffer_info, skb, - length); - } - } - } - - /* Receive Checksum Offload XXX recompute due to CRC strip? */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - /* eth type trans needs skb->data to point to something */ - if (!pskb_may_pull(skb, ETH_HLEN)) { - ndev_err(netdev, "pskb_may_pull failed.\n"); - dev_kfree_skb(skb); - goto next_desc; - } - - e1000_receive_skb(adapter, netdev, skb, status, - rx_desc->special); - -next_desc: - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - rx_ring->next_to_clean = i; - - cleaned_count = e1000_desc_unused(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count); - - adapter->total_rx_bytes += total_rx_bytes; - adapter->total_rx_packets += total_rx_packets; - adapter->net_stats.rx_bytes += total_rx_bytes; - adapter->net_stats.rx_packets += total_rx_packets; - return cleaned; -} - /** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure @@ -1066,10 +802,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); - else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) - pci_unmap_page(pdev, buffer_info->dma, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -1077,11 +809,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) buffer_info->dma = 0; } - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - } - if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; @@ -2028,12 +1755,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * a lot of memory, since we allocate 3 pages at all times * per packet. */ + adapter->rx_ps_pages = 0; pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && - (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; - else - adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { /* Configure extra packet-split registers */ @@ -2094,12 +1819,9 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; - } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_jumbo_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + rdlen = rx_ring->count * + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -2163,21 +1885,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) { - u32 rxdctl = er32(RXDCTL(0)); - ew32(RXDCTL(0), rxdctl | 0x3); - ew32(ERT, E1000_ERT_2048 | (1 << 13)); - /* - * With jumbo frames and early-receive enabled, excessive - * C4->C2 latencies result in dropped transactions. - */ - pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, - e1000e_driver_name, 55); - } else { - pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, - e1000e_driver_name, - PM_QOS_DEFAULT_VALUE); - } + (adapter->netdev->mtu > ETH_DATA_LEN)) + ew32(ERT, E1000_ERT_2048); /* Enable Receives */ ew32(RCTL, rctl); @@ -2446,14 +2155,6 @@ void e1000e_reset(struct e1000_adapter *adapter) /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); - - /* - * For parts with AMT enabled, let the firmware know - * that the network interface is in control - */ - if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) - e1000_get_hw_control(adapter); - ew32(WUC, 0); if (mac->ops.init_hw(hw)) @@ -3768,8 +3469,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab - * However with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs */ if (max_frame <= 256) @@ -3927,9 +3626,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(CTRL_EXT, ctrl_ext); } - if (adapter->flags & FLAG_IS_ICH) - e1000e_disable_gig_wol_ich8lan(&adapter->hw); - /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); @@ -4596,13 +4292,6 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, { } /* terminate list */ }; @@ -4637,9 +4326,7 @@ static int __init e1000_init_module(void) printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", e1000e_driver_name); ret = pci_register_driver(&e1000_driver); - pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, - PM_QOS_DEFAULT_VALUE); - + return ret; } module_init(e1000_init_module); @@ -4653,7 +4340,6 @@ module_init(e1000_init_module); static void __exit e1000_exit_module(void) { pci_unregister_driver(&e1000_driver); - pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); } module_exit(e1000_exit_module); diff --git a/trunk/drivers/net/e1000e/phy.c b/trunk/drivers/net/e1000e/phy.c index b133dcf0e950..e102332a6bee 100644 --- a/trunk/drivers/net/e1000e/phy.c +++ b/trunk/drivers/net/e1000e/phy.c @@ -34,9 +34,6 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); -static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); -static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, - u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = @@ -468,10 +465,6 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if (phy->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - /* Enable downshift on BM (disabled by default) */ - if (phy->type == e1000_phy_bm) - phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; - ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; @@ -1783,10 +1776,6 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; - case BME1000_E_PHY_ID: - case BME1000_E_PHY_ID_R2: - phy_type = e1000_phy_bm; - break; default: phy_type = e1000_phy_unknown; break; @@ -1794,273 +1783,6 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) return phy_type; } -/** - * e1000e_determine_phy_address - Determines PHY address. - * @hw: pointer to the HW structure - * - * This uses a trial and error method to loop through possible PHY - * addresses. It tests each by reading the PHY ID registers and - * checking for a match. - **/ -s32 e1000e_determine_phy_address(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_PHY_TYPE; - u32 phy_addr= 0; - u32 i = 0; - enum e1000_phy_type phy_type = e1000_phy_unknown; - - do { - for (phy_addr = 0; phy_addr < 4; phy_addr++) { - hw->phy.addr = phy_addr; - e1000e_get_phy_id(hw); - phy_type = e1000e_get_phy_type_from_id(hw->phy.id); - - /* - * If phy_type is valid, break - we found our - * PHY address - */ - if (phy_type != e1000_phy_unknown) { - ret_val = 0; - break; - } - } - i++; - } while ((ret_val != 0) && (i < 100)); - - return ret_val; -} - -/** - * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address - * @page: page to access - * - * Returns the phy address for the page requested. - **/ -static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) -{ - u32 phy_addr = 2; - - if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) - phy_addr = 1; - - return phy_addr; -} - -/** - * e1000e_write_phy_reg_bm - Write BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u32 page_select = 0; - u32 page = offset >> IGP_PAGE_SHIFT; - u32 page_shift = 0; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, - false); - goto out; - } - - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - /* - * Page select is register 31 for phy address 1 and 22 for - * phy address 2 and 3. Page select is shifted only for - * phy address 1. - */ - if (hw->phy.addr == 1) { - page_shift = IGP_PAGE_SHIFT; - page_select = IGP01E1000_PHY_PAGE_SELECT; - } else { - page_shift = 0; - page_select = BM_PHY_PAGE_SELECT; - } - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); - if (ret_val) { - hw->phy.ops.release_phy(hw); - goto out; - } - } - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release_phy(hw); - -out: - return ret_val; -} - -/** - * e1000e_read_phy_reg_bm - Read BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u32 page_select = 0; - u32 page = offset >> IGP_PAGE_SHIFT; - u32 page_shift = 0; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, - true); - goto out; - } - - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - /* - * Page select is register 31 for phy address 1 and 22 for - * phy address 2 and 3. Page select is shifted only for - * phy address 1. - */ - if (hw->phy.addr == 1) { - page_shift = IGP_PAGE_SHIFT; - page_select = IGP01E1000_PHY_PAGE_SELECT; - } else { - page_shift = 0; - page_select = BM_PHY_PAGE_SELECT; - } - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); - if (ret_val) { - hw->phy.ops.release_phy(hw); - goto out; - } - } - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - hw->phy.ops.release_phy(hw); - -out: - return ret_val; -} - -/** - * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register - * @hw: pointer to the HW structure - * @offset: register offset to be read or written - * @data: pointer to the data to read or write - * @read: determines if operation is read or write - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. Note that procedure to read the wakeup - * registers are different. It works as such: - * 1) Set page 769, register 17, bit 2 = 1 - * 2) Set page to 800 for host (801 if we were manageability) - * 3) Write the address using the address opcode (0x11) - * 4) Read or write the data using the data opcode (0x12) - * 5) Restore 769_17.2 to its original value - **/ -static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, - u16 *data, bool read) -{ - s32 ret_val; - u16 reg = ((u16)offset) & PHY_REG_MASK; - u16 phy_reg = 0; - u8 phy_acquired = 1; - - - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) { - phy_acquired = 0; - goto out; - } - - /* All operations in this function are phy address 1 */ - hw->phy.addr = 1; - - /* Set page 769 */ - e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, - (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); - - ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); - if (ret_val) - goto out; - - /* First clear bit 4 to avoid a power state change */ - phy_reg &= ~(BM_WUC_HOST_WU_BIT); - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); - if (ret_val) - goto out; - - /* Write bit 2 = 1, and clear bit 4 to 769_17 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, - phy_reg | BM_WUC_ENABLE_BIT); - if (ret_val) - goto out; - - /* Select page 800 */ - ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, - (BM_WUC_PAGE << IGP_PAGE_SHIFT)); - - /* Write the page 800 offset value using opcode 0x11 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); - if (ret_val) - goto out; - - if (read) { - /* Read the page 800 value using opcode 0x12 */ - ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - data); - } else { - /* Read the page 800 value using opcode 0x12 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - *data); - } - - if (ret_val) - goto out; - - /* - * Restore 769_17.2 to its original value - * Set page 769 - */ - e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, - (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); - - /* Clear 769_17.2 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); - -out: - if (phy_acquired == 1) - hw->phy.ops.release_phy(hw); - return ret_val; -} - /** * e1000e_commit_phy - Soft PHY reset * @hw: pointer to the HW structure diff --git a/trunk/drivers/net/eexpress.c b/trunk/drivers/net/eexpress.c index 795c594a4b7c..2eb82aba4a8b 100644 --- a/trunk/drivers/net/eexpress.c +++ b/trunk/drivers/net/eexpress.c @@ -202,7 +202,7 @@ static unsigned short start_code[] = { 0x0000,Cmd_MCast, 0x0076, /* link to next command */ #define CONF_NR_MULTICAST 0x44 - 0x0000, /* number of bytes in multicast address(es) */ + 0x0000, /* number of multicast addresses */ #define CONF_MULTICAST 0x46 0x0000, 0x0000, 0x0000, /* some addresses */ 0x0000, 0x0000, 0x0000, @@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev) static void eexp_setup_filter(struct net_device *dev) { - struct dev_mc_list *dmi; + struct dev_mc_list *dmi = dev->mc_list; unsigned short ioaddr = dev->base_addr; int count = dev->mc_count; int i; @@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev) } outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); - outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); - for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) { - unsigned short *data; + outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST)); + for (i = 0; i < count; i++) { + unsigned short *data = (unsigned short *)dmi->dmi_addr; if (!dmi) { printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); break; @@ -1591,7 +1591,6 @@ static void eexp_setup_filter(struct net_device *dev) printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); continue; } - data = (unsigned short *)dmi->dmi_addr; outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); diff --git a/trunk/drivers/net/fs_enet/mii-fec.c b/trunk/drivers/net/fs_enet/mii-fec.c index f0014cfbb275..ba75efc9f5b5 100644 --- a/trunk/drivers/net/fs_enet/mii-fec.c +++ b/trunk/drivers/net/fs_enet/mii-fec.c @@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, ret = of_address_to_resource(ofdev->node, 0, &res); if (ret) - goto out_res; + return ret; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); @@ -236,7 +236,6 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); -out_res: out_fec: kfree(fec); out_mii: diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c index 6f22f068d6ee..587afe7be689 100644 --- a/trunk/drivers/net/gianfar.c +++ b/trunk/drivers/net/gianfar.c @@ -138,7 +138,6 @@ static int gfar_poll(struct napi_struct *napi, int budget); static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); -static int gfar_clean_tx_ring(struct net_device *dev); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); @@ -1142,7 +1141,7 @@ static int gfar_close(struct net_device *dev) } /* Changes the mac address if the controller is not running. */ -static int gfar_set_mac_address(struct net_device *dev) +int gfar_set_mac_address(struct net_device *dev) { gfar_set_mac_for_addr(dev, 0, dev->dev_addr); @@ -1261,7 +1260,7 @@ static void gfar_timeout(struct net_device *dev) } /* Interrupt Handler for Transmit complete */ -static int gfar_clean_tx_ring(struct net_device *dev) +int gfar_clean_tx_ring(struct net_device *dev) { struct txbd8 *bdp; struct gfar_private *priv = netdev_priv(dev); diff --git a/trunk/drivers/net/gianfar.h b/trunk/drivers/net/gianfar.h index 27f37c81e52c..fd487be3993e 100644 --- a/trunk/drivers/net/gianfar.h +++ b/trunk/drivers/net/gianfar.h @@ -782,8 +782,5 @@ extern void gfar_halt(struct net_device *dev); extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, u32 regnum, u32 read); void gfar_init_sysfs(struct net_device *dev); -int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, - int regnum, u16 value); -int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); #endif /* __GIANFAR_H */ diff --git a/trunk/drivers/net/gianfar_sysfs.c b/trunk/drivers/net/gianfar_sysfs.c index 5116f68e01b9..230878b94190 100644 --- a/trunk/drivers/net/gianfar_sysfs.c +++ b/trunk/drivers/net/gianfar_sysfs.c @@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (length > priv->rx_buffer_size) - goto out; + return count; if (length == priv->rx_stash_size) - goto out; + return count; priv->rx_stash_size = length; @@ -125,7 +125,6 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, gfar_write(&priv->regs->attr, temp); -out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; @@ -155,10 +154,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (index > priv->rx_stash_size) - goto out; + return count; if (index == priv->rx_stash_index) - goto out; + return count; priv->rx_stash_index = index; @@ -167,7 +166,6 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, temp |= ATTRELI_EI(index); gfar_write(&priv->regs->attreli, flags); -out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; diff --git a/trunk/drivers/net/irda/nsc-ircc.c b/trunk/drivers/net/irda/nsc-ircc.c index a7714da7c283..a873d2b315ca 100644 --- a/trunk/drivers/net/irda/nsc-ircc.c +++ b/trunk/drivers/net/irda/nsc-ircc.c @@ -100,9 +100,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); -#ifdef CONFIG_PNP static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); -#endif /* These are the known NSC chips */ static nsc_chip_t chips[] = { @@ -158,11 +156,9 @@ static const struct pnp_device_id nsc_ircc_pnp_table[] = { MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); static struct pnp_driver nsc_ircc_pnp_driver = { -#ifdef CONFIG_PNP .name = "nsc-ircc", .id_table = nsc_ircc_pnp_table, .probe = nsc_ircc_pnp_probe, -#endif }; /* Some prototypes */ @@ -920,7 +916,6 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) return 0; } -#ifdef CONFIG_PNP /* PNP probing */ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) { @@ -957,7 +952,6 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i return 0; } -#endif /* * Function nsc_ircc_setup (info) diff --git a/trunk/drivers/net/irda/smsc-ircc2.c b/trunk/drivers/net/irda/smsc-ircc2.c index cfe0194fef71..1f26da761e9f 100644 --- a/trunk/drivers/net/irda/smsc-ircc2.c +++ b/trunk/drivers/net/irda/smsc-ircc2.c @@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table); static int pnp_driver_registered; -#ifdef CONFIG_PNP static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { @@ -403,9 +402,7 @@ static struct pnp_driver smsc_ircc_pnp_driver = { .id_table = smsc_ircc_pnp_table, .probe = smsc_ircc_pnp_probe, }; -#else /* CONFIG_PNP */ -static struct pnp_driver smsc_ircc_pnp_driver; -#endif + /******************************************************************************* * diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index c36a03ae9bfb..2056cfc624dc 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev) unregister_netdevice(dev); if (list_empty(&port->vlans)) - macvlan_port_destroy(port->dev); + macvlan_port_destroy(dev); } static struct rtnl_link_ops macvlan_link_ops __read_mostly = { diff --git a/trunk/drivers/net/mlx4/mr.c b/trunk/drivers/net/mlx4/mr.c index 03a9abcce524..cb46446b2691 100644 --- a/trunk/drivers/net/mlx4/mr.c +++ b/trunk/drivers/net/mlx4/mr.c @@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, u64 mtt_seg; int err = -ENOMEM; - if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) + if (page_shift < 12 || page_shift >= 32) return -EINVAL; /* All MTTs must fit in the same page */ diff --git a/trunk/drivers/net/mv643xx_eth.c b/trunk/drivers/net/mv643xx_eth.c index b7915cdcc6a5..381b36e5f64c 100644 --- a/trunk/drivers/net/mv643xx_eth.c +++ b/trunk/drivers/net/mv643xx_eth.c @@ -91,11 +91,6 @@ */ #define PHY_ADDR_REG 0x0000 #define SMI_REG 0x0004 -#define WINDOW_BASE(i) (0x0200 + ((i) << 3)) -#define WINDOW_SIZE(i) (0x0204 + ((i) << 3)) -#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2)) -#define WINDOW_BAR_ENABLE 0x0290 -#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4)) /* * Per-port registers. @@ -512,23 +507,9 @@ struct mv643xx_mib_counters { u32 late_collision; }; -struct mv643xx_shared_private { - void __iomem *eth_base; - - /* used to protect SMI_REG, which is shared across ports */ - spinlock_t phy_lock; - - u32 win_protect; - - unsigned int t_clk; -}; - struct mv643xx_private { - struct mv643xx_shared_private *shared; int port_num; /* User Ethernet port number */ - struct mv643xx_shared_private *shared_smi; - u32 rx_sram_addr; /* Base address of rx sram area */ u32 rx_sram_size; /* Size of rx sram area */ u32 tx_sram_addr; /* Base address of tx sram area */ @@ -633,14 +614,19 @@ static const struct ethtool_ops mv643xx_ethtool_ops; static char mv643xx_driver_name[] = "mv643xx_eth"; static char mv643xx_driver_version[] = "1.0"; +static void __iomem *mv643xx_eth_base; + +/* used to protect SMI_REG, which is shared across ports */ +static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); + static inline u32 rdl(struct mv643xx_private *mp, int offset) { - return readl(mp->shared->eth_base + offset); + return readl(mv643xx_eth_base + offset); } static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) { - writel(data, mp->shared->eth_base + offset); + writel(data, mv643xx_eth_base + offset); } /* @@ -1133,6 +1119,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * * INPUT: * struct mv643xx_private *mp Ethernet port + * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in usec * * OUTPUT: @@ -1143,10 +1130,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * */ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, - unsigned int delay) + unsigned int t_clk, unsigned int delay) { unsigned int port_num = mp->port_num; - unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; + unsigned int coal = ((t_clk / 1000000) * delay) / 64; /* Set RX Coalescing mechanism */ wrl(mp, SDMA_CONFIG_REG(port_num), @@ -1171,6 +1158,7 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * * INPUT: * struct mv643xx_private *mp Ethernet port + * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in uSeconds * * OUTPUT: @@ -1181,9 +1169,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * */ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, - unsigned int delay) + unsigned int t_clk, unsigned int delay) { - unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; + unsigned int coal = ((t_clk / 1000000) * delay) / 64; /* Set TX Coalescing mechanism */ wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); @@ -1425,11 +1413,11 @@ static int mv643xx_eth_open(struct net_device *dev) #ifdef MV643XX_COAL mp->rx_int_coal = - eth_port_set_rx_coal(mp, MV643XX_RX_COAL); + eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); #endif mp->tx_int_coal = - eth_port_set_tx_coal(mp, MV643XX_TX_COAL); + eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); /* Unmask phy and link status changes interrupts */ wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); @@ -1839,11 +1827,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) return -ENODEV; } - if (pd->shared == NULL) { - printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n"); - return -ENODEV; - } - dev = alloc_etherdev(sizeof(struct mv643xx_private)); if (!dev) return -ENOMEM; @@ -1894,16 +1877,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) spin_lock_init(&mp->lock); - mp->shared = platform_get_drvdata(pd->shared); port_num = mp->port_num = pd->port_number; - if (mp->shared->win_protect) - wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect); - - mp->shared_smi = mp->shared; - if (pd->shared_smi != NULL) - mp->shared_smi = platform_get_drvdata(pd->shared_smi); - /* set default config values */ eth_port_uc_addr_get(mp, dev->dev_addr); mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; @@ -2008,91 +1983,30 @@ static int mv643xx_eth_remove(struct platform_device *pdev) return 0; } -static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, - struct mbus_dram_target_info *dram) -{ - void __iomem *base = msp->eth_base; - u32 win_enable; - u32 win_protect; - int i; - - for (i = 0; i < 6; i++) { - writel(0, base + WINDOW_BASE(i)); - writel(0, base + WINDOW_SIZE(i)); - if (i < 4) - writel(0, base + WINDOW_REMAP_HIGH(i)); - } - - win_enable = 0x3f; - win_protect = 0; - - for (i = 0; i < dram->num_cs; i++) { - struct mbus_dram_window *cs = dram->cs + i; - - writel((cs->base & 0xffff0000) | - (cs->mbus_attr << 8) | - dram->mbus_dram_target_id, base + WINDOW_BASE(i)); - writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); - - win_enable &= ~(1 << i); - win_protect |= 3 << (2 * i); - } - - writel(win_enable, base + WINDOW_BAR_ENABLE); - msp->win_protect = win_protect; -} - static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_version_printed = 0; - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; - struct mv643xx_shared_private *msp; struct resource *res; - int ret; if (!mv643xx_version_printed++) printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); - ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) - goto out; - - ret = -ENOMEM; - msp = kmalloc(sizeof(*msp), GFP_KERNEL); - if (msp == NULL) - goto out; - memset(msp, 0, sizeof(*msp)); - - msp->eth_base = ioremap(res->start, res->end - res->start + 1); - if (msp->eth_base == NULL) - goto out_free; - - spin_lock_init(&msp->phy_lock); - msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; - - platform_set_drvdata(pdev, msp); + return -ENODEV; - /* - * (Re-)program MBUS remapping windows if we are asked to. - */ - if (pd != NULL && pd->dram != NULL) - mv643xx_eth_conf_mbus_windows(msp, pd->dram); + mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); + if (mv643xx_eth_base == NULL) + return -ENOMEM; return 0; -out_free: - kfree(msp); -out: - return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) { - struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); - - iounmap(msp->eth_base); - kfree(msp); + iounmap(mv643xx_eth_base); + mv643xx_eth_base = NULL; return 0; } @@ -2992,16 +2906,15 @@ static void eth_port_reset(struct mv643xx_private *mp) static void eth_port_read_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int *value) { - void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; int phy_addr = ethernet_phy_get(mp); unsigned long flags; int i; /* the SMI register is a shared resource */ - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); + spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { + for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -3009,11 +2922,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, - smi_reg); + wrl(mp, SMI_REG, + (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); /* now wait for the data to be valid */ - for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { + for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY read timeout\n", mp->dev->name); goto out; @@ -3021,9 +2934,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - *value = readl(smi_reg) & 0xffff; + *value = rdl(mp, SMI_REG) & 0xffff; out: - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); + spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); } /* @@ -3049,16 +2962,17 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, static void eth_port_write_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int value) { - void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; - int phy_addr = ethernet_phy_get(mp); - unsigned long flags; + int phy_addr; int i; + unsigned long flags; + + phy_addr = ethernet_phy_get(mp); /* the SMI register is a shared resource */ - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); + spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { + for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -3066,10 +2980,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - writel((phy_addr << 16) | (phy_reg << 21) | - ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); + wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | + ETH_SMI_OPCODE_WRITE | (value & 0xffff)); out: - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); + spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); } /* diff --git a/trunk/drivers/net/niu.c b/trunk/drivers/net/niu.c index 57cfd72ffdf7..4009c4ce96b4 100644 --- a/trunk/drivers/net/niu.c +++ b/trunk/drivers/net/niu.c @@ -1,6 +1,6 @@ /* niu.c: Neptune ethernet driver. * - * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ #include @@ -33,8 +33,8 @@ #define DRV_MODULE_NAME "niu" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.9" -#define DRV_MODULE_RELDATE "May 4, 2008" +#define DRV_MODULE_VERSION "0.8" +#define DRV_MODULE_RELDATE "April 24, 2008" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -7264,11 +7264,8 @@ static int __devinit niu_get_and_validate_port(struct niu *np) parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; - /* All of the current probing methods fail on - * Maramba on-board parts. - */ if (!parent->num_ports) - parent->num_ports = 4; + return -ENODEV; } } } diff --git a/trunk/drivers/net/pcnet32.c b/trunk/drivers/net/pcnet32.c index a1c454dbc164..4eb322e5273d 100644 --- a/trunk/drivers/net/pcnet32.c +++ b/trunk/drivers/net/pcnet32.c @@ -22,8 +22,12 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#define DRV_VERSION "1.35" -#define DRV_RELDATE "21.Apr.2008" +#ifdef CONFIG_PCNET32_NAPI +#define DRV_VERSION "1.34-NAPI" +#else +#define DRV_VERSION "1.34" +#endif +#define DRV_RELDATE "14.Aug.2007" #define PFX DRV_NAME ": " static const char *const version = @@ -441,24 +445,30 @@ static struct pcnet32_access pcnet32_dwio = { static void pcnet32_netif_stop(struct net_device *dev) { +#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); - +#endif dev->trans_start = jiffies; +#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); +#endif netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { +#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); ulong ioaddr = dev->base_addr; u16 val; - +#endif netif_wake_queue(dev); +#ifdef CONFIG_PCNET32_NAPI val = lp->a.read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a.write_csr(ioaddr, CSR3, val); napi_enable(&lp->napi); +#endif } /* @@ -901,7 +911,11 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) rc = 1; /* default to fail */ if (netif_running(dev)) +#ifdef CONFIG_PCNET32_NAPI pcnet32_netif_stop(dev); +#else + pcnet32_close(dev); +#endif spin_lock_irqsave(&lp->lock, flags); lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -1032,6 +1046,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ a->write_bcr(ioaddr, 32, (x & ~0x0002)); +#ifdef CONFIG_PCNET32_NAPI if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); @@ -1040,6 +1055,16 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); +#else + if (netif_running(dev)) { + spin_unlock_irqrestore(&lp->lock, flags); + pcnet32_open(dev); + } else { + pcnet32_purge_rx_ring(dev); + lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + spin_unlock_irqrestore(&lp->lock, flags); + } +#endif return (rc); } /* end pcnet32_loopback_test */ @@ -1245,7 +1270,11 @@ static void pcnet32_rx_entry(struct net_device *dev, } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); +#ifdef CONFIG_PCNET32_NAPI netif_receive_skb(skb); +#else + netif_rx(skb); +#endif dev->last_rx = jiffies; dev->stats.rx_packets++; return; @@ -1374,6 +1403,7 @@ static int pcnet32_tx(struct net_device *dev) return must_restart; } +#ifdef CONFIG_PCNET32_NAPI static int pcnet32_poll(struct napi_struct *napi, int budget) { struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); @@ -1412,6 +1442,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) } return work_done; } +#endif #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 @@ -1833,7 +1864,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; +#ifdef CONFIG_PCNET32_NAPI netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); +#endif if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) @@ -2264,7 +2297,9 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } +#ifdef CONFIG_PCNET32_NAPI napi_enable(&lp->napi); +#endif /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); @@ -2588,6 +2623,7 @@ pcnet32_interrupt(int irq, void *dev_id) dev->name, csr0); /* unlike for the lance, there is no restart needed */ } +#ifdef CONFIG_PCNET32_NAPI if (netif_rx_schedule_prep(dev, &lp->napi)) { u16 val; /* set interrupt masks */ @@ -2598,9 +2634,24 @@ pcnet32_interrupt(int irq, void *dev_id) __netif_rx_schedule(dev, &lp->napi); break; } +#else + pcnet32_rx(dev, lp->napi.weight); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } +#endif csr0 = lp->a.read_csr(ioaddr, CSR0); } +#ifndef CONFIG_PCNET32_NAPI + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); +#endif + if (netif_msg_intr(lp)) printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", dev->name, lp->a.read_csr(ioaddr, CSR0)); @@ -2619,7 +2670,9 @@ static int pcnet32_close(struct net_device *dev) del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); +#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); +#endif spin_lock_irqsave(&lp->lock, flags); diff --git a/trunk/drivers/net/phy/phy.c b/trunk/drivers/net/phy/phy.c index 45cc2914d347..3c18bb594957 100644 --- a/trunk/drivers/net/phy/phy.c +++ b/trunk/drivers/net/phy/phy.c @@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev) * Must not be called from interrupt context, or while the * phydev->lock is held. */ -static void phy_error(struct phy_device *phydev) +void phy_error(struct phy_device *phydev) { mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; diff --git a/trunk/drivers/net/tulip/uli526x.c b/trunk/drivers/net/tulip/uli526x.c index 2511ca7a12aa..a59c1f224aa8 100644 --- a/trunk/drivers/net/tulip/uli526x.c +++ b/trunk/drivers/net/tulip/uli526x.c @@ -434,6 +434,10 @@ static int uli526x_open(struct net_device *dev) ULI526X_DBUG(0, "uli526x_open", 0); + ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + /* system variable init */ db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; db->tx_packet_cnt = 0; @@ -452,10 +456,6 @@ static int uli526x_open(struct net_device *dev) /* Initialize ULI526X board */ uli526x_init(dev); - ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - /* Active System Interface */ netif_wake_queue(dev); @@ -1368,12 +1368,6 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) * This setup frame initialize ULI526X address filter mode */ -#ifdef __BIG_ENDIAN -#define FLT_SHIFT 16 -#else -#define FLT_SHIFT 0 -#endif - static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); @@ -1390,27 +1384,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) /* Node address */ addrptr = (u16 *) dev->dev_addr; - *suptr++ = addrptr[0] << FLT_SHIFT; - *suptr++ = addrptr[1] << FLT_SHIFT; - *suptr++ = addrptr[2] << FLT_SHIFT; + *suptr++ = addrptr[0]; + *suptr++ = addrptr[1]; + *suptr++ = addrptr[2]; /* broadcast address */ - *suptr++ = 0xffff << FLT_SHIFT; - *suptr++ = 0xffff << FLT_SHIFT; - *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff; + *suptr++ = 0xffff; + *suptr++ = 0xffff; /* fit the multicast address */ for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { addrptr = (u16 *) mcptr->dmi_addr; - *suptr++ = addrptr[0] << FLT_SHIFT; - *suptr++ = addrptr[1] << FLT_SHIFT; - *suptr++ = addrptr[2] << FLT_SHIFT; + *suptr++ = addrptr[0]; + *suptr++ = addrptr[1]; + *suptr++ = addrptr[2]; } for (; i<14; i++) { - *suptr++ = 0xffff << FLT_SHIFT; - *suptr++ = 0xffff << FLT_SHIFT; - *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff; + *suptr++ = 0xffff; + *suptr++ = 0xffff; } /* prepare the setup frame */ diff --git a/trunk/drivers/net/ucc_geth.c b/trunk/drivers/net/ucc_geth.c index ca0bdac07a78..281ce3d39532 100644 --- a/trunk/drivers/net/ucc_geth.c +++ b/trunk/drivers/net/ucc_geth.c @@ -62,6 +62,7 @@ #endif /* UGETH_VERBOSE_DEBUG */ #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 +void uec_set_ethtool_ops(struct net_device *netdev); static DEFINE_SPINLOCK(ugeth_lock); @@ -215,8 +216,7 @@ static struct list_head *dequeue(struct list_head *lh) } } -static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, - u8 __iomem *bd) +static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) { struct sk_buff *skb = NULL; @@ -236,22 +236,21 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, skb->dev = ugeth->dev; - out_be32(&((struct qe_bd __iomem *)bd)->buf, + out_be32(&((struct qe_bd *)bd)->buf, dma_map_single(NULL, skb->data, ugeth->ug_info->uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, DMA_FROM_DEVICE)); - out_be32((u32 __iomem *)bd, - (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); + out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); return skb; } static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) { - u8 __iomem *bd; + u8 *bd; u32 bd_status; struct sk_buff *skb; int i; @@ -260,7 +259,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) i = 0; do { - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32*)bd); skb = get_new_skb(ugeth, bd); if (!skb) /* If can not allocate data buffer, @@ -278,7 +277,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) } static int fill_init_enet_entries(struct ucc_geth_private *ugeth, - u32 *p_start, + volatile u32 *p_start, u8 num_entries, u32 thread_size, u32 thread_alignment, @@ -317,7 +316,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, } static int return_init_enet_entries(struct ucc_geth_private *ugeth, - u32 *p_start, + volatile u32 *p_start, u8 num_entries, enum qe_risc_allocation risc, int skip_page_for_first_entry) @@ -327,22 +326,21 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { - u32 val = *p_start; - /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { /* First entry of Rx does not have page */ init_enet_offset = - (val & ENET_INIT_PARAM_PTR_MASK); + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); qe_muram_free(init_enet_offset); } - *p_start++ = 0; + *(p_start++) = 0; /* Just for cosmetics */ } } @@ -351,7 +349,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, #ifdef DEBUG static int dump_init_enet_entries(struct ucc_geth_private *ugeth, - u32 __iomem *p_start, + volatile u32 *p_start, u8 num_entries, u32 thread_size, enum qe_risc_allocation risc, @@ -362,13 +360,11 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { - u32 val = in_be32(p_start); - /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { @@ -444,7 +440,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) { - struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; if (!(paddr_num < NUM_OF_PADDRS)) { ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); @@ -452,7 +448,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) } p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> addressfiltering; /* Writing address ff.ff.ff.ff.ff.ff disables address @@ -467,11 +463,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, u8 *p_enet_addr) { - struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; u32 cecr_subblock; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> addressfiltering; cecr_subblock = @@ -491,7 +487,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth __iomem *ug_regs; + struct ucc_geth *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -511,7 +507,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth __iomem *ug_regs; + struct ucc_geth *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -542,13 +538,13 @@ static void get_statistics(struct ucc_geth_private *ugeth, rx_firmware_statistics, struct ucc_geth_hardware_statistics *hardware_statistics) { - struct ucc_fast __iomem *uf_regs; - struct ucc_geth __iomem *ug_regs; + struct ucc_fast *uf_regs; + struct ucc_geth *ug_regs; struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; ug_regs = ugeth->ug_regs; - uf_regs = (struct ucc_fast __iomem *) ug_regs; + uf_regs = (struct ucc_fast *) ug_regs; p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; @@ -1136,9 +1132,9 @@ static void dump_regs(struct ucc_geth_private *ugeth) } #endif /* DEBUG */ -static void init_default_reg_vals(u32 __iomem *upsmr_register, - u32 __iomem *maccfg1_register, - u32 __iomem *maccfg2_register) +static void init_default_reg_vals(volatile u32 *upsmr_register, + volatile u32 *maccfg1_register, + volatile u32 *maccfg2_register) { out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); @@ -1152,7 +1148,7 @@ static int init_half_duplex_params(int alt_beb, u8 alt_beb_truncation, u8 max_retransmissions, u8 collision_window, - u32 __iomem *hafdup_register) + volatile u32 *hafdup_register) { u32 value = 0; @@ -1184,7 +1180,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, u8 non_btb_ipg, u8 min_ifg, u8 btb_ipg, - u32 __iomem *ipgifg_register) + volatile u32 *ipgifg_register) { u32 value = 0; @@ -1219,9 +1215,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode, int tx_flow_control_enable, u16 pause_period, u16 extension_field, - u32 __iomem *upsmr_register, - u32 __iomem *uempr_register, - u32 __iomem *maccfg1_register) + volatile u32 *upsmr_register, + volatile u32 *uempr_register, + volatile u32 *maccfg1_register) { u32 value = 0; @@ -1247,8 +1243,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode, static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, int auto_zero_hardware_statistics, - u32 __iomem *upsmr_register, - u16 __iomem *uescr_register) + volatile u32 *upsmr_register, + volatile u16 *uescr_register) { u32 upsmr_value = 0; u16 uescr_value = 0; @@ -1274,12 +1270,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, static int init_firmware_statistics_gathering_mode(int enable_tx_firmware_statistics, int enable_rx_firmware_statistics, - u32 __iomem *tx_rmon_base_ptr, + volatile u32 *tx_rmon_base_ptr, u32 tx_firmware_statistics_structure_address, - u32 __iomem *rx_rmon_base_ptr, + volatile u32 *rx_rmon_base_ptr, u32 rx_firmware_statistics_structure_address, - u16 __iomem *temoder_register, - u32 __iomem *remoder_register) + volatile u16 *temoder_register, + volatile u32 *remoder_register) { /* Note: this function does not check if */ /* the parameters it receives are NULL */ @@ -1311,8 +1307,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0, u8 address_byte_3, u8 address_byte_4, u8 address_byte_5, - u32 __iomem *macstnaddr1_register, - u32 __iomem *macstnaddr2_register) + volatile u32 *macstnaddr1_register, + volatile u32 *macstnaddr2_register) { u32 value = 0; @@ -1348,7 +1344,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0, } static int init_check_frame_length_mode(int length_check, - u32 __iomem *maccfg2_register) + volatile u32 *maccfg2_register) { u32 value = 0; @@ -1364,7 +1360,7 @@ static int init_check_frame_length_mode(int length_check, } static int init_preamble_length(u8 preamble_length, - u32 __iomem *maccfg2_register) + volatile u32 *maccfg2_register) { u32 value = 0; @@ -1380,7 +1376,7 @@ static int init_preamble_length(u8 preamble_length, static int init_rx_parameters(int reject_broadcast, int receive_short_frames, - int promiscuous, u32 __iomem *upsmr_register) + int promiscuous, volatile u32 *upsmr_register) { u32 value = 0; @@ -1407,7 +1403,7 @@ static int init_rx_parameters(int reject_broadcast, } static int init_max_rx_buff_len(u16 max_rx_buf_len, - u16 __iomem *mrblr_register) + volatile u16 *mrblr_register) { /* max_rx_buf_len value must be a multiple of 128 */ if ((max_rx_buf_len == 0) @@ -1419,8 +1415,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len, } static int init_min_frame_len(u16 min_frame_length, - u16 __iomem *minflr_register, - u16 __iomem *mrblr_register) + volatile u16 *minflr_register, + volatile u16 *mrblr_register) { u16 mrblr_value = 0; @@ -1435,8 +1431,8 @@ static int init_min_frame_len(u16 min_frame_length, static int adjust_enet_interface(struct ucc_geth_private *ugeth) { struct ucc_geth_info *ug_info; - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; + struct ucc_geth *ug_regs; + struct ucc_fast *uf_regs; int ret_val; u32 upsmr, maccfg2, tbiBaseAddress; u16 value; @@ -1521,8 +1517,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) static void adjust_link(struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth __iomem *ug_regs; - struct ucc_fast __iomem *uf_regs; + struct ucc_geth *ug_regs; + struct ucc_fast *uf_regs; struct phy_device *phydev = ugeth->phydev; unsigned long flags; int new_state = 0; @@ -1682,9 +1678,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) uccf = ugeth->uccf; /* Clear acknowledge bit */ - temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); + temp = ugeth->p_rx_glbl_pram->rxgstpack; temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; - out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); + ugeth->p_rx_glbl_pram->rxgstpack = temp; /* Keep issuing command and checking acknowledge bit until it is asserted, according to spec */ @@ -1696,7 +1692,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); - temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); + temp = ugeth->p_rx_glbl_pram->rxgstpack; } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); uccf->stopped_rx = 1; @@ -1995,20 +1991,19 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * enum enet_addr_type enet_addr_type) { - struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; struct ucc_fast_private *uccf; enum comm_dir comm_dir; struct list_head *p_lh; u16 i, num; - u32 __iomem *addr_h; - u32 __iomem *addr_l; + u32 *addr_h, *addr_l; u8 *p_counter; uccf = ugeth->uccf; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram __iomem *) - ugeth->p_rx_glbl_pram->addressfiltering; + (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> + addressfiltering; if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { addr_h = &(p_82xx_addr_filt->gaddr_h); @@ -2084,7 +2079,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge static void ucc_geth_memclean(struct ucc_geth_private *ugeth) { u16 i, j; - u8 __iomem *bd; + u8 *bd; if (!ugeth) return; @@ -2159,8 +2154,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { if (ugeth->tx_skbuff[i][j]) { dma_unmap_single(NULL, - in_be32(&((struct qe_bd __iomem *)bd)->buf), - (in_be32((u32 __iomem *)bd) & + ((struct qe_bd *)bd)->buf, + (in_be32((u32 *)bd) & BD_LENGTH_MASK), DMA_TO_DEVICE); dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); @@ -2187,7 +2182,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { if (ugeth->rx_skbuff[i][j]) { dma_unmap_single(NULL, - in_be32(&((struct qe_bd __iomem *)bd)->buf), + ((struct qe_bd *)bd)->buf, ugeth->ug_info-> uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, @@ -2223,8 +2218,8 @@ static void ucc_geth_set_multi(struct net_device *dev) { struct ucc_geth_private *ugeth; struct dev_mc_list *dmi; - struct ucc_fast __iomem *uf_regs; - struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_fast *uf_regs; + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; int i; ugeth = netdev_priv(dev); @@ -2233,14 +2228,14 @@ static void ucc_geth_set_multi(struct net_device *dev) if (dev->flags & IFF_PROMISC) { - out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); + uf_regs->upsmr |= UPSMR_PRO; } else { - out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); + uf_regs->upsmr &= ~UPSMR_PRO; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> p_rx_glbl_pram->addressfiltering; if (dev->flags & IFF_ALLMULTI) { @@ -2275,7 +2270,7 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { - struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; + struct ucc_geth *ug_regs = ugeth->ug_regs; struct phy_device *phydev = ugeth->phydev; u32 tempval; @@ -2424,20 +2419,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) return -ENOMEM; } - ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); + ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); return 0; } static int ucc_geth_startup(struct ucc_geth_private *ugeth) { - struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; - struct ucc_geth_init_pram __iomem *p_init_enet_pram; + struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_init_pram *p_init_enet_pram; struct ucc_fast_private *uccf; struct ucc_geth_info *ug_info; struct ucc_fast_info *uf_info; - struct ucc_fast __iomem *uf_regs; - struct ucc_geth __iomem *ug_regs; + struct ucc_fast *uf_regs; + struct ucc_geth *ug_regs; int ret_val = -EINVAL; u32 remoder = UCC_GETH_REMODER_INIT; u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; @@ -2445,8 +2440,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) u16 temoder = UCC_GETH_TEMODER_INIT; u16 test; u8 function_code = 0; - u8 __iomem *bd; - u8 __iomem *endOfRing; + u8 *bd, *endOfRing; u8 numThreadsRxNumerical, numThreadsTxNumerical; ugeth_vdbg("%s: IN", __FUNCTION__); @@ -2608,11 +2602,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_TX_BD_RING_ALIGNMENT; ugeth->tx_bd_ring_offset[j] = - (u32) kmalloc((u32) (length + align), GFP_KERNEL); + kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->tx_bd_ring_offset[j] != 0) ugeth->p_tx_bd_ring[j] = - (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + + (void*)((ugeth->tx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->tx_bd_ring_offset[j] = @@ -2620,7 +2614,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_TX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) ugeth->p_tx_bd_ring[j] = - (u8 __iomem *) qe_muram_addr(ugeth-> + (u8 *) qe_muram_addr(ugeth-> tx_bd_ring_offset[j]); } if (!ugeth->p_tx_bd_ring[j]) { @@ -2632,8 +2626,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } /* Zero unused end of bd ring, according to spec */ - memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, + memset(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); } @@ -2645,10 +2639,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_RX_BD_RING_ALIGNMENT; ugeth->rx_bd_ring_offset[j] = - (u32) kmalloc((u32) (length + align), GFP_KERNEL); + kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->rx_bd_ring_offset[j] != 0) ugeth->p_rx_bd_ring[j] = - (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + + (void*)((ugeth->rx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->rx_bd_ring_offset[j] = @@ -2656,7 +2650,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) ugeth->p_rx_bd_ring[j] = - (u8 __iomem *) qe_muram_addr(ugeth-> + (u8 *) qe_muram_addr(ugeth-> rx_bd_ring_offset[j]); } if (!ugeth->p_rx_bd_ring[j]) { @@ -2691,14 +2685,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { /* clear bd buffer */ - out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + out_be32(&((struct qe_bd *)bd)->buf, 0); /* set bd status and length */ - out_be32((u32 __iomem *)bd, 0); + out_be32((u32 *)bd, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ + out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ } /* Init Rx bds */ @@ -2723,14 +2717,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { /* set bd status and length */ - out_be32((u32 __iomem *)bd, R_I); + out_be32((u32 *)bd, R_I); /* clear bd buffer */ - out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + out_be32(&((struct qe_bd *)bd)->buf, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ + out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ } /* @@ -2750,10 +2744,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_glbl_pram = - (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> tx_glbl_pram_offset); /* Zero out p_tx_glbl_pram */ - memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); + memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); /* Fill global PRAM */ @@ -2774,7 +2768,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_tx = - (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> thread_dat_tx_offset); out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); @@ -2785,8 +2779,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* iphoffset */ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) - out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], - ug_info->iphoffset[i]); + ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; /* SQPTR */ /* Size varies with number of Tx queues */ @@ -2804,7 +2797,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_send_q_mem_reg = - (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> send_q_mem_reg_offset); out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); @@ -2848,26 +2841,25 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_scheduler = - (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> scheduler_offset); out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, ugeth->scheduler_offset); /* Zero out p_scheduler */ - memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); + memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); /* Set values in scheduler */ out_be32(&ugeth->p_scheduler->mblinterval, ug_info->mblinterval); out_be16(&ugeth->p_scheduler->nortsrbytetime, ug_info->nortsrbytetime); - out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); - out_8(&ugeth->p_scheduler->strictpriorityq, - ug_info->strictpriorityq); - out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); - out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); + ugeth->p_scheduler->fracsiz = ug_info->fracsiz; + ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; + ugeth->p_scheduler->txasap = ug_info->txasap; + ugeth->p_scheduler->extrabw = ug_info->extrabw; for (i = 0; i < NUM_TX_QUEUES; i++) - out_8(&ugeth->p_scheduler->weightfactor[i], - ug_info->weightfactor[i]); + ugeth->p_scheduler->weightfactor[i] = + ug_info->weightfactor[i]; /* Set pointers to cpucount registers in scheduler */ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); @@ -2898,10 +2890,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_fw_statistics_pram = - (struct ucc_geth_tx_firmware_statistics_pram __iomem *) + (struct ucc_geth_tx_firmware_statistics_pram *) qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); /* Zero out p_tx_fw_statistics_pram */ - memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, + memset(ugeth->p_tx_fw_statistics_pram, 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); } @@ -2938,10 +2930,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_glbl_pram = - (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> rx_glbl_pram_offset); /* Zero out p_rx_glbl_pram */ - memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); + memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); /* Fill global PRAM */ @@ -2961,7 +2953,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_rx = - (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> thread_dat_rx_offset); out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); @@ -2984,10 +2976,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_fw_statistics_pram = - (struct ucc_geth_rx_firmware_statistics_pram __iomem *) + (struct ucc_geth_rx_firmware_statistics_pram *) qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); /* Zero out p_rx_fw_statistics_pram */ - memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, + memset(ugeth->p_rx_fw_statistics_pram, 0, sizeof(struct ucc_geth_rx_firmware_statistics_pram)); } @@ -3008,7 +3000,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_irq_coalescing_tbl = - (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) + (struct ucc_geth_rx_interrupt_coalescing_table *) qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, ugeth->rx_irq_coalescing_tbl_offset); @@ -3077,11 +3069,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_bd_qs_tbl = - (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> rx_bd_qs_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); /* Zero out p_rx_bd_qs_tbl */ - memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, + memset(ugeth->p_rx_bd_qs_tbl, 0, ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + sizeof(struct ucc_geth_rx_prefetched_bds))); @@ -3141,7 +3133,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) &ugeth->p_rx_glbl_pram->remoder); /* function code register */ - out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); + ugeth->p_rx_glbl_pram->rstate = function_code; /* initialize extended filtering */ if (ug_info->rxExtendedFiltering) { @@ -3168,7 +3160,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_exf_glbl_param = - (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> + (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> exf_glbl_param_offset); out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, ugeth->exf_glbl_param_offset); @@ -3183,7 +3175,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> p_rx_glbl_pram->addressfiltering; ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, @@ -3315,21 +3307,17 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } p_init_enet_pram = - (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); + (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); /* Copy shadow InitEnet command parameter structure into PRAM */ - out_8(&p_init_enet_pram->resinit1, - ugeth->p_init_enet_param_shadow->resinit1); - out_8(&p_init_enet_pram->resinit2, - ugeth->p_init_enet_param_shadow->resinit2); - out_8(&p_init_enet_pram->resinit3, - ugeth->p_init_enet_param_shadow->resinit3); - out_8(&p_init_enet_pram->resinit4, - ugeth->p_init_enet_param_shadow->resinit4); + p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; + p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; + p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; + p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; out_be16(&p_init_enet_pram->resinit5, ugeth->p_init_enet_param_shadow->resinit5); - out_8(&p_init_enet_pram->largestexternallookupkeysize, - ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); + p_init_enet_pram->largestexternallookupkeysize = + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; out_be32(&p_init_enet_pram->rgftgfrxglobal, ugeth->p_init_enet_param_shadow->rgftgfrxglobal); for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) @@ -3383,7 +3371,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_UGETH_TX_ON_DEMAND struct ucc_fast_private *uccf; #endif - u8 __iomem *bd; /* BD pointer */ + u8 *bd; /* BD pointer */ u32 bd_status; u8 txQ = 0; @@ -3395,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Start from the next BD that should be filled */ bd = ugeth->txBd[txQ]; - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32 *)bd); /* Save the skb pointer so we can free it later */ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; @@ -3405,7 +3393,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); /* set up the buffer descriptor */ - out_be32(&((struct qe_bd __iomem *)bd)->buf, + out_be32(&((struct qe_bd *)bd)->buf, dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ @@ -3413,7 +3401,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; /* set bd status and length */ - out_be32((u32 __iomem *)bd, bd_status); + out_be32((u32 *)bd, bd_status); dev->trans_start = jiffies; @@ -3453,7 +3441,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) { struct sk_buff *skb; - u8 __iomem *bd; + u8 *bd; u16 length, howmany = 0; u32 bd_status; u8 *bdBuffer; @@ -3466,11 +3454,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit /* collect received buffers */ bd = ugeth->rxBd[rxQ]; - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32 *)bd); /* while there are received buffers and BD is full (~R_E) */ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { - bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); + bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; @@ -3528,7 +3516,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit else bd += sizeof(struct qe_bd); - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32 *)bd); } ugeth->rxBd[rxQ] = bd; @@ -3539,11 +3527,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); - u8 __iomem *bd; /* BD pointer */ + u8 *bd; /* BD pointer */ u32 bd_status; bd = ugeth->confBd[txQ]; - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32 *)bd); /* Normal processing. */ while ((bd_status & T_R) == 0) { @@ -3573,7 +3561,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd += sizeof(struct qe_bd); else bd = ugeth->p_tx_bd_ring[txQ]; - bd_status = in_be32((u32 __iomem *)bd); + bd_status = in_be32((u32 *)bd); } ugeth->confBd[txQ] = bd; return 0; @@ -3922,7 +3910,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma return -EINVAL; } } else { - prop = of_get_property(np, "tx-clock", NULL); + prop = of_get_property(np, "rx-clock", NULL); if (!prop) { printk(KERN_ERR "ucc_geth: mising tx-clock-name property\n"); diff --git a/trunk/drivers/net/ucc_geth.h b/trunk/drivers/net/ucc_geth.h index abc0e2242634..9f8b7580a3a4 100644 --- a/trunk/drivers/net/ucc_geth.h +++ b/trunk/drivers/net/ucc_geth.h @@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram { u32 iaddr_l; /* individual address filter, low */ u32 gaddr_h; /* group address filter, high */ u32 gaddr_l; /* group address filter, low */ - struct ucc_geth_82xx_enet_address __iomem taddr; - struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; + struct ucc_geth_82xx_enet_address taddr; + struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; u8 res0[0x40 - 0x38]; } __attribute__ ((packed)); @@ -1186,40 +1186,40 @@ struct ucc_geth_private { struct ucc_fast_private *uccf; struct net_device *dev; struct napi_struct napi; - struct ucc_geth __iomem *ug_regs; + struct ucc_geth *ug_regs; struct ucc_geth_init_pram *p_init_enet_param_shadow; - struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; + struct ucc_geth_exf_global_pram *p_exf_glbl_param; u32 exf_glbl_param_offset; - struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; + struct ucc_geth_rx_global_pram *p_rx_glbl_pram; u32 rx_glbl_pram_offset; - struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; + struct ucc_geth_tx_global_pram *p_tx_glbl_pram; u32 tx_glbl_pram_offset; - struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; + struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; u32 send_q_mem_reg_offset; - struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; + struct ucc_geth_thread_data_tx *p_thread_data_tx; u32 thread_dat_tx_offset; - struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; + struct ucc_geth_thread_data_rx *p_thread_data_rx; u32 thread_dat_rx_offset; - struct ucc_geth_scheduler __iomem *p_scheduler; + struct ucc_geth_scheduler *p_scheduler; u32 scheduler_offset; - struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; + struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; u32 tx_fw_statistics_pram_offset; - struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; + struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; u32 rx_fw_statistics_pram_offset; - struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; + struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; u32 rx_irq_coalescing_tbl_offset; - struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; + struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; u32 rx_bd_qs_tbl_offset; - u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; + u8 *p_tx_bd_ring[NUM_TX_QUEUES]; u32 tx_bd_ring_offset[NUM_TX_QUEUES]; - u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; + u8 *p_rx_bd_ring[NUM_RX_QUEUES]; u32 rx_bd_ring_offset[NUM_RX_QUEUES]; - u8 __iomem *confBd[NUM_TX_QUEUES]; - u8 __iomem *txBd[NUM_TX_QUEUES]; - u8 __iomem *rxBd[NUM_RX_QUEUES]; + u8 *confBd[NUM_TX_QUEUES]; + u8 *txBd[NUM_TX_QUEUES]; + u8 *rxBd[NUM_RX_QUEUES]; int badFrame[NUM_RX_QUEUES]; u16 cpucount[NUM_TX_QUEUES]; - u16 __iomem *p_cpucount[NUM_TX_QUEUES]; + volatile u16 *p_cpucount[NUM_TX_QUEUES]; int indAddrRegUsed[NUM_OF_PADDRS]; u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ u8 numGroupAddrInHash; @@ -1251,12 +1251,4 @@ struct ucc_geth_private { int oldlink; }; -void uec_set_ethtool_ops(struct net_device *netdev); -int init_flow_control_params(u32 automatic_flow_control_mode, - int rx_flow_control_enable, int tx_flow_control_enable, - u16 pause_period, u16 extension_field, - u32 __iomem *upsmr_register, u32 __iomem *uempr_register, - u32 __iomem *maccfg1_register); - - #endif /* __UCC_GETH_H__ */ diff --git a/trunk/drivers/net/ucc_geth_ethtool.c b/trunk/drivers/net/ucc_geth_ethtool.c index 299b7f176950..9a9622c13e2b 100644 --- a/trunk/drivers/net/ucc_geth_ethtool.c +++ b/trunk/drivers/net/ucc_geth_ethtool.c @@ -108,6 +108,12 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) +extern int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, u16 pause_period, + u16 extension_field, volatile u32 *upsmr_register, + volatile u32 *uempr_register, volatile u32 *maccfg1_register); + static int uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { diff --git a/trunk/drivers/net/ucc_geth_mii.c b/trunk/drivers/net/ucc_geth_mii.c index 940474736922..2af490781005 100644 --- a/trunk/drivers/net/ucc_geth_mii.c +++ b/trunk/drivers/net/ucc_geth_mii.c @@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum) } /* Reset the MIIM registers, and wait for the bus to free */ -static int uec_mdio_reset(struct mii_bus *bus) +int uec_mdio_reset(struct mii_bus *bus) { struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; unsigned int timeout = PHY_INIT_TIMEOUT; @@ -240,7 +240,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma return err; } -static int uec_mdio_remove(struct of_device *ofdev) +int uec_mdio_remove(struct of_device *ofdev) { struct device *device = &ofdev->dev; struct mii_bus *bus = dev_get_drvdata(device); diff --git a/trunk/drivers/net/usb/asix.c b/trunk/drivers/net/usb/asix.c index dc6f097062df..6f245cfb6624 100644 --- a/trunk/drivers/net/usb/asix.c +++ b/trunk/drivers/net/usb/asix.c @@ -1380,10 +1380,6 @@ static const struct usb_device_id products [] = { // Buffalo LUA-U2-KTX USB_DEVICE (0x0411, 0x003d), .driver_info = (unsigned long) &ax8817x_info, -}, { - // Buffalo LUA-U2-GT 10/100/1000 - USB_DEVICE (0x0411, 0x006e), - .driver_info = (unsigned long) &ax88178_info, }, { // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" USB_DEVICE (0x6189, 0x182d), diff --git a/trunk/drivers/net/wan/lapbether.c b/trunk/drivers/net/wan/lapbether.c index 24fd613466b7..b5860b97a93e 100644 --- a/trunk/drivers/net/wan/lapbether.c +++ b/trunk/drivers/net/wan/lapbether.c @@ -459,7 +459,6 @@ static void __exit lapbeth_cleanup_driver(void) list_for_each_safe(entry, tmp, &lapbeth_devices) { lapbeth = list_entry(entry, struct lapbethdev, node); - dev_put(lapbeth->ethdev); unregister_netdevice(lapbeth->axdev); } rtnl_unlock(); diff --git a/trunk/drivers/net/wireless/iwlwifi/Kconfig b/trunk/drivers/net/wireless/iwlwifi/Kconfig index 62fb89d82318..d5b7a76fcaad 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Kconfig +++ b/trunk/drivers/net/wireless/iwlwifi/Kconfig @@ -1,5 +1,6 @@ config IWLWIFI - tristate + bool + default n config IWLCORE tristate "Intel Wireless Wifi Core" diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 3706ce7972dd..4a55bf380957 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -842,26 +842,14 @@ static void set_pcie_port_type(struct pci_dev *pdev) * reading the dword at 0x100 which must either be 0 or a valid extended * capability header. */ -int pci_cfg_space_size_ext(struct pci_dev *dev) -{ - u32 status; - - if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) - goto fail; - if (status == 0xffffffff) - goto fail; - - return PCI_CFG_SPACE_EXP_SIZE; - - fail: - return PCI_CFG_SPACE_SIZE; -} - -int pci_cfg_space_size(struct pci_dev *dev) +int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix) { int pos; u32 status; + if (!check_exp_pcix) + goto skip; + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) { pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); @@ -873,12 +861,23 @@ int pci_cfg_space_size(struct pci_dev *dev) goto fail; } - return pci_cfg_space_size_ext(dev); + skip: + if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) + goto fail; + if (status == 0xffffffff) + goto fail; + + return PCI_CFG_SPACE_EXP_SIZE; fail: return PCI_CFG_SPACE_SIZE; } +int pci_cfg_space_size(struct pci_dev *dev) +{ + return pci_cfg_space_size_ext(dev, 1); +} + static void pci_release_bus_bridge_dev(struct device *dev) { kfree(dev); diff --git a/trunk/drivers/rtc/rtc-ds1511.c b/trunk/drivers/rtc/rtc-ds1511.c index 0f0d27d1c4ca..a83a40b3ebaa 100644 --- a/trunk/drivers/rtc/rtc-ds1511.c +++ b/trunk/drivers/rtc/rtc-ds1511.c @@ -184,7 +184,7 @@ ds1511_wdog_disable(void) static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) { u8 mon, day, dow, hrs, min, sec, yrs, cen; - unsigned long flags; + unsigned int flags; /* * won't have to change this for a while @@ -247,7 +247,7 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm) { unsigned int century; - unsigned long flags; + unsigned int flags; spin_lock_irqsave(&ds1511_lock, flags); rtc_disable_update(); diff --git a/trunk/drivers/rtc/rtc-sh.c b/trunk/drivers/rtc/rtc-sh.c index 1f88e9e914ec..110699bb4787 100644 --- a/trunk/drivers/rtc/rtc-sh.c +++ b/trunk/drivers/rtc/rtc-sh.c @@ -616,7 +616,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) goto err_badres; } - rtc->regbase = ioremap_nocache(rtc->res->start, rtc->regsize); + rtc->regbase = (void __iomem *)rtc->res->start; if (unlikely(!rtc->regbase)) { ret = -EINVAL; goto err_badmap; @@ -626,7 +626,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) &sh_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { ret = PTR_ERR(rtc->rtc_dev); - goto err_unmap; + goto err_badmap; } rtc->capabilities = RTC_DEF_CAPABILITIES; @@ -653,7 +653,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "request period IRQ failed with %d, IRQ %d\n", ret, rtc->periodic_irq); - goto err_unmap; + goto err_badmap; } ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED, @@ -663,7 +663,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) "request carry IRQ failed with %d, IRQ %d\n", ret, rtc->carry_irq); free_irq(rtc->periodic_irq, rtc); - goto err_unmap; + goto err_badmap; } ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED, @@ -674,7 +674,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) rtc->alarm_irq); free_irq(rtc->carry_irq, rtc); free_irq(rtc->periodic_irq, rtc); - goto err_unmap; + goto err_badmap; } tmp = readb(rtc->regbase + RCR1); @@ -684,8 +684,6 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) return 0; -err_unmap: - iounmap(rtc->regbase); err_badmap: release_resource(rtc->res); err_badres: @@ -710,8 +708,6 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev) release_resource(rtc->res); - iounmap(rtc->regbase); - platform_set_drvdata(pdev, NULL); kfree(rtc); diff --git a/trunk/drivers/s390/char/tty3270.c b/trunk/drivers/s390/char/tty3270.c index 5043150019ac..c1f2adefad41 100644 --- a/trunk/drivers/s390/char/tty3270.c +++ b/trunk/drivers/s390/char/tty3270.c @@ -965,7 +965,8 @@ tty3270_write_room(struct tty_struct *tty) * Insert character into the screen at the current position with the * current color and highlight. This function does NOT do cursor movement. */ -static void tty3270_put_character(struct tty3270 *tp, char ch) +static int +tty3270_put_character(struct tty3270 *tp, char ch) { struct tty3270_line *line; struct tty3270_cell *cell; @@ -985,6 +986,7 @@ static void tty3270_put_character(struct tty3270 *tp, char ch) cell->character = tp->view.ascebc[(unsigned int) ch]; cell->highlight = tp->highlight; cell->f_color = tp->f_color; + return 1; } /* @@ -1610,15 +1612,16 @@ tty3270_write(struct tty_struct * tty, /* * Put single characters to the ttys character buffer */ -static int tty3270_put_char(struct tty_struct *tty, unsigned char ch) +static void +tty3270_put_char(struct tty_struct *tty, unsigned char ch) { struct tty3270 *tp; tp = tty->driver_data; - if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE) - return 0; - tp->char_buf[tp->char_count++] = ch; - return 1; + if (!tp) + return; + if (tp->char_count < TTY3270_CHAR_BUF_SIZE) + tp->char_buf[tp->char_count++] = ch; } /* diff --git a/trunk/drivers/s390/cio/blacklist.c b/trunk/drivers/s390/cio/blacklist.c index 9c21b8f43f9b..40ef948fcb3a 100644 --- a/trunk/drivers/s390/cio/blacklist.c +++ b/trunk/drivers/s390/cio/blacklist.c @@ -19,7 +19,6 @@ #include #include -#include #include "blacklist.h" #include "cio.h" @@ -44,169 +43,164 @@ typedef enum {add, free} range_action; * Function: blacklist_range * (Un-)blacklist the devices from-to */ -static int blacklist_range(range_action action, unsigned int from_ssid, - unsigned int to_ssid, unsigned int from, - unsigned int to, int msgtrigger) +static void +blacklist_range (range_action action, unsigned int from, unsigned int to, + unsigned int ssid) { - if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { - if (msgtrigger) - printk(KERN_WARNING "cio: Invalid cio_ignore range " - "0.%x.%04x-0.%x.%04x\n", from_ssid, from, - to_ssid, to); - return 1; + if (!to) + to = from; + + if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) { + printk (KERN_WARNING "cio: Invalid blacklist range " + "0.%x.%04x to 0.%x.%04x, skipping\n", + ssid, from, ssid, to); + return; } - - while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) && - (from <= to))) { + for (; from <= to; from++) { if (action == add) - set_bit(from, bl_dev[from_ssid]); + set_bit (from, bl_dev[ssid]); else - clear_bit(from, bl_dev[from_ssid]); - from++; - if (from > __MAX_SUBCHANNEL) { - from_ssid++; - from = 0; - } + clear_bit (from, bl_dev[ssid]); } - - return 0; } -static int pure_hex(char **cp, unsigned int *val, int min_digit, - int max_digit, int max_val) +/* + * Function: blacklist_busid + * Get devno/busid from given string. + * Shamelessly grabbed from dasd_devmap.c. + */ +static int +blacklist_busid(char **str, int *id0, int *ssid, int *devno) { - int diff; - unsigned int value; + int val, old_style; + char *sav; - diff = 0; - *val = 0; + sav = *str; - while (isxdigit(**cp) && (diff <= max_digit)) { - - if (isdigit(**cp)) - value = **cp - '0'; - else - value = tolower(**cp) - 'a' + 10; - *val = *val * 16 + value; - (*cp)++; - diff++; + /* check for leading '0x' */ + old_style = 0; + if ((*str)[0] == '0' && (*str)[1] == 'x') { + *str += 2; + old_style = 1; } - - if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) - return 1; - + if (!isxdigit((*str)[0])) /* We require at least one hex digit */ + goto confused; + val = simple_strtoul(*str, str, 16); + if (old_style || (*str)[0] != '.') { + *id0 = *ssid = 0; + if (val < 0 || val > 0xffff) + goto confused; + *devno = val; + if ((*str)[0] != ',' && (*str)[0] != '-' && + (*str)[0] != '\n' && (*str)[0] != '\0') + goto confused; + return 0; + } + /* New style x.y.z busid */ + if (val < 0 || val > 0xff) + goto confused; + *id0 = val; + (*str)++; + if (!isxdigit((*str)[0])) /* We require at least one hex digit */ + goto confused; + val = simple_strtoul(*str, str, 16); + if (val < 0 || val > 0xff || (*str)++[0] != '.') + goto confused; + *ssid = val; + if (!isxdigit((*str)[0])) /* We require at least one hex digit */ + goto confused; + val = simple_strtoul(*str, str, 16); + if (val < 0 || val > 0xffff) + goto confused; + *devno = val; + if ((*str)[0] != ',' && (*str)[0] != '-' && + (*str)[0] != '\n' && (*str)[0] != '\0') + goto confused; return 0; +confused: + strsep(str, ",\n"); + printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav); + return 1; } -static int parse_busid(char *str, int *cssid, int *ssid, int *devno, - int msgtrigger) +static int +blacklist_parse_parameters (char *str, range_action action) { - char *str_work; - int val, rc, ret; - - rc = 1; - - if (*str == '\0') - goto out; - - /* old style */ - str_work = str; - val = simple_strtoul(str, &str_work, 16); - - if (*str_work == '\0') { - if (val <= __MAX_SUBCHANNEL) { - *devno = val; - *ssid = 0; - *cssid = 0; - rc = 0; + int from, to, from_id0, to_id0, from_ssid, to_ssid; + + while (*str != 0 && *str != '\n') { + range_action ra = action; + while(*str == ',') + str++; + if (*str == '!') { + ra = !action; + ++str; } - goto out; - } - /* new style */ - str_work = str; - ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); - if (ret || (str_work[0] != '.')) - goto out; - str_work++; - ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); - if (ret || (str_work[0] != '.')) - goto out; - str_work++; - ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); - if (ret || (str_work[0] != '\0')) - goto out; - - rc = 0; -out: - if (rc && msgtrigger) - printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", - str); - - return rc; -} - -static int blacklist_parse_parameters(char *str, range_action action, - int msgtrigger) -{ - int from_cssid, to_cssid, from_ssid, to_ssid, from, to; - int rc, totalrc; - char *parm; - range_action ra; - - totalrc = 0; - - while ((parm = strsep(&str, ","))) { - rc = 0; - ra = action; - if (*parm == '!') { - if (ra == add) - ra = free; - else - ra = add; - parm++; - } - if (strcmp(parm, "all") == 0) { - from_cssid = 0; - from_ssid = 0; - from = 0; - to_cssid = __MAX_CSSID; - to_ssid = __MAX_SSID; - to = __MAX_SUBCHANNEL; + /* + * Since we have to parse the proc commands and the + * kernel arguments we have to check four cases + */ + if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || + strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { + int j; + + str += 3; + for (j=0; j <= __MAX_SSID; j++) + blacklist_range(ra, 0, __MAX_SUBCHANNEL, j); } else { - rc = parse_busid(strsep(&parm, "-"), &from_cssid, - &from_ssid, &from, msgtrigger); - if (!rc) { - if (parm != NULL) - rc = parse_busid(parm, &to_cssid, - &to_ssid, &to, - msgtrigger); - else { - to_cssid = from_cssid; - to_ssid = from_ssid; - to = from; - } + int rc; + + rc = blacklist_busid(&str, &from_id0, + &from_ssid, &from); + if (rc) + continue; + to = from; + to_id0 = from_id0; + to_ssid = from_ssid; + if (*str == '-') { + str++; + rc = blacklist_busid(&str, &to_id0, + &to_ssid, &to); + if (rc) + continue; } + if (*str == '-') { + printk(KERN_WARNING "cio: invalid cio_ignore " + "parameter '%s'\n", + strsep(&str, ",\n")); + continue; + } + if ((from_id0 != to_id0) || + (from_ssid != to_ssid)) { + printk(KERN_WARNING "cio: invalid cio_ignore " + "range %x.%x.%04x-%x.%x.%04x\n", + from_id0, from_ssid, from, + to_id0, to_ssid, to); + continue; + } + blacklist_range (ra, from, to, to_ssid); } - if (!rc) { - rc = blacklist_range(ra, from_ssid, to_ssid, from, to, - msgtrigger); - if (rc) - totalrc = 1; - } else - totalrc = 1; } - - return totalrc; + return 1; } +/* Parsing the commandline for blacklist parameters, e.g. to blacklist + * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of: + * - cio_ignore=1234-1236 + * - cio_ignore=0x1234-0x1235,1236 + * - cio_ignore=0x1234,1235-1236 + * - cio_ignore=1236 cio_ignore=1234-0x1236 + * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235 + * - cio_ignore=0.0.1234-0.0.1236 + * - cio_ignore=0.0.1234,0x1235,1236 + * - ... + */ static int __init blacklist_setup (char *str) { CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); - if (blacklist_parse_parameters(str, add, 1)) - return 0; - return 1; + return blacklist_parse_parameters (str, add); } __setup ("cio_ignore=", blacklist_setup); @@ -230,23 +224,27 @@ is_blacklisted (int ssid, int devno) * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore */ -static int blacklist_parse_proc_parameters(char *buf) +static void +blacklist_parse_proc_parameters (char *buf) { - int rc; - char *parm; - - parm = strsep(&buf, " "); - - if (strcmp("free", parm) == 0) - rc = blacklist_parse_parameters(buf, free, 0); - else if (strcmp("add", parm) == 0) - rc = blacklist_parse_parameters(buf, add, 0); - else - return 1; + if (strncmp (buf, "free ", 5) == 0) { + blacklist_parse_parameters (buf + 5, free); + } else if (strncmp (buf, "add ", 4) == 0) { + /* + * We don't need to check for known devices since + * css_probe_device will handle this correctly. + */ + blacklist_parse_parameters (buf + 4, add); + } else { + printk (KERN_WARNING "cio: cio_ignore: Parse error; \n" + KERN_WARNING "try using 'free all|," + ",...'\n" + KERN_WARNING "or 'add ," + ",...'\n"); + return; + } css_schedule_reprobe(); - - return rc; } /* Iterator struct for all devices. */ @@ -330,8 +328,6 @@ cio_ignore_write(struct file *file, const char __user *user_buf, size_t user_len, loff_t *offset) { char *buf; - size_t i; - ssize_t rc, ret; if (*offset) return -EINVAL; @@ -340,27 +336,16 @@ cio_ignore_write(struct file *file, const char __user *user_buf, buf = vmalloc (user_len + 1); /* maybe better use the stack? */ if (buf == NULL) return -ENOMEM; - memset(buf, 0, user_len + 1); - if (strncpy_from_user (buf, user_buf, user_len) < 0) { - rc = -EFAULT; - goto out_free; + vfree (buf); + return -EFAULT; } + buf[user_len] = '\0'; - i = user_len - 1; - while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) { - buf[i] = '\0'; - i--; - } - ret = blacklist_parse_proc_parameters(buf); - if (ret) - rc = -EINVAL; - else - rc = user_len; + blacklist_parse_proc_parameters (buf); -out_free: vfree (buf); - return rc; + return user_len; } static const struct seq_operations cio_ignore_proc_seq_ops = { diff --git a/trunk/drivers/s390/cio/cio.c b/trunk/drivers/s390/cio/cio.c index 82c6a2d45128..08a578161306 100644 --- a/trunk/drivers/s390/cio/cio.c +++ b/trunk/drivers/s390/cio/cio.c @@ -39,6 +39,23 @@ debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; debug_info_t *cio_debug_crw_id; +int cio_show_msg; + +static int __init +cio_setup (char *parm) +{ + if (!strcmp (parm, "yes")) + cio_show_msg = 1; + else if (!strcmp (parm, "no")) + cio_show_msg = 0; + else + printk(KERN_ERR "cio: cio_setup: " + "invalid cio_msg parameter '%s'", parm); + return 1; +} + +__setup ("cio_msg=", cio_setup); + /* * Function: cio_debug_init * Initializes three debug logs for common I/O: @@ -149,7 +166,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) stsch (sch->schid, &sch->schib); - CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " + CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " "subchannel 0.%x.%04x!\n", sch->schid.ssid, sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); @@ -550,9 +567,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) * ... just being curious we check for non I/O subchannels */ if (sch->st != 0) { - CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " - "non-I/O subchannel type %04X\n", - sch->schid.ssid, sch->schid.sch_no, sch->st); + CIO_DEBUG(KERN_INFO, 0, + "Subchannel 0.%x.%04x reports " + "non-I/O subchannel type %04X\n", + sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ err = sch->st; goto out; @@ -570,7 +588,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ - CIO_MSG_EVENT(6, "Blacklisted device detected " + CIO_MSG_EVENT(4, "Blacklisted device detected " "at devno %04X, subchannel set %x\n", sch->schib.pmcw.dev, sch->schid.ssid); err = -ENODEV; @@ -583,11 +601,12 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) sch->lpm = sch->schib.pmcw.pam & sch->opm; sch->isc = 3; - CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " - "- PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->schid.ssid, - sch->schid.sch_no, sch->schib.pmcw.pim, - sch->schib.pmcw.pam, sch->schib.pmcw.pom); + CIO_DEBUG(KERN_INFO, 0, + "Detected device %04x on subchannel 0.%x.%04X" + " - PIM = %02X, PAM = %02X, POM = %02X\n", + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, + sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* * We now have to initially ... diff --git a/trunk/drivers/s390/cio/cio.h b/trunk/drivers/s390/cio/cio.h index 6e933aebe013..3c75412904dc 100644 --- a/trunk/drivers/s390/cio/cio.h +++ b/trunk/drivers/s390/cio/cio.h @@ -118,4 +118,6 @@ extern void *cio_get_console_priv(void); #define cio_get_console_priv() NULL #endif +extern int cio_show_msg; + #endif diff --git a/trunk/drivers/s390/cio/cio_debug.h b/trunk/drivers/s390/cio/cio_debug.h index e64e8278c42e..d7429ef6c666 100644 --- a/trunk/drivers/s390/cio/cio_debug.h +++ b/trunk/drivers/s390/cio/cio_debug.h @@ -31,4 +31,10 @@ static inline void CIO_HEX_EVENT(int level, void *data, int length) } } +#define CIO_DEBUG(printk_level, event_level, msg...) do { \ + if (cio_show_msg) \ + printk(printk_level "cio: " msg); \ + CIO_MSG_EVENT(event_level, msg); \ + } while (0) + #endif diff --git a/trunk/drivers/s390/cio/css.c b/trunk/drivers/s390/cio/css.c index a76956512b2d..595e327d2f76 100644 --- a/trunk/drivers/s390/cio/css.c +++ b/trunk/drivers/s390/cio/css.c @@ -570,7 +570,7 @@ static void reprobe_all(struct work_struct *unused) { int ret; - CIO_MSG_EVENT(4, "reprobe start\n"); + CIO_MSG_EVENT(2, "reprobe start\n"); need_reprobe = 0; /* Make sure initial subchannel scan is done. */ @@ -578,7 +578,7 @@ static void reprobe_all(struct work_struct *unused) atomic_read(&ccw_device_init_count) == 0); ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); - CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, + CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, need_reprobe); } diff --git a/trunk/drivers/s390/cio/device.c b/trunk/drivers/s390/cio/device.c index e22813db74a2..abfd601d237a 100644 --- a/trunk/drivers/s390/cio/device.c +++ b/trunk/drivers/s390/cio/device.c @@ -341,7 +341,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) rc = device_schedule_callback(&cdev->dev, ccw_device_remove_orphan_cb); if (rc) - CIO_MSG_EVENT(0, "Couldn't unregister orphan " + CIO_MSG_EVENT(2, "Couldn't unregister orphan " "0.%x.%04x\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -351,7 +351,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) rc = device_schedule_callback(cdev->dev.parent, ccw_device_remove_sch_cb); if (rc) - CIO_MSG_EVENT(0, "Couldn't unregister disconnected device " + CIO_MSG_EVENT(2, "Couldn't unregister disconnected device " "0.%x.%04x\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -397,7 +397,7 @@ int ccw_device_set_offline(struct ccw_device *cdev) if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else { - CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " + CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -433,7 +433,7 @@ int ccw_device_set_online(struct ccw_device *cdev) if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else { - CIO_MSG_EVENT(0, "ccw_device_online returned %d, " + CIO_MSG_EVENT(2, "ccw_device_online returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -451,7 +451,7 @@ int ccw_device_set_online(struct ccw_device *cdev) if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else - CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " + CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -803,7 +803,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch, other_sch = to_subchannel(get_device(cdev->dev.parent)); ret = device_move(&cdev->dev, &sch->dev); if (ret) { - CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " + CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed " "(ret=%d)!\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); put_device(&other_sch->dev); @@ -933,7 +933,7 @@ io_subchannel_register(struct work_struct *work) ret = device_reprobe(&cdev->dev); if (ret) /* We can't do much here. */ - CIO_MSG_EVENT(0, "device_reprobe() returned" + CIO_MSG_EVENT(2, "device_reprobe() returned" " %d for 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -1086,7 +1086,7 @@ static void ccw_device_move_to_sch(struct work_struct *work) rc = device_move(&cdev->dev, &sch->dev); mutex_unlock(&sch->reg_mutex); if (rc) { - CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " + CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel " "0.%x.%04x failed (ret=%d)!\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, sch->schid.ssid, @@ -1446,7 +1446,8 @@ ccw_device_remove (struct device *dev) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else - CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " + //FIXME: we can't fail! + CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -1523,7 +1524,7 @@ static int recovery_check(struct device *dev, void *data) spin_lock_irq(cdev->ccwlock); switch (cdev->private->state) { case DEV_STATE_DISCONNECTED: - CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n", + CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); dev_fsm_event(cdev, DEV_EVENT_VERIFY); @@ -1553,7 +1554,7 @@ static void recovery_work_func(struct work_struct *unused) } spin_unlock_irq(&recovery_lock); } else - CIO_MSG_EVENT(4, "recovery: end\n"); + CIO_MSG_EVENT(2, "recovery: end\n"); } static DECLARE_WORK(recovery_work, recovery_work_func); @@ -1571,7 +1572,7 @@ void ccw_device_schedule_recovery(void) { unsigned long flags; - CIO_MSG_EVENT(4, "recovery: schedule\n"); + CIO_MSG_EVENT(2, "recovery: schedule\n"); spin_lock_irqsave(&recovery_lock, flags); if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { recovery_phase = 0; diff --git a/trunk/drivers/s390/cio/device_fsm.c b/trunk/drivers/s390/cio/device_fsm.c index e268d5a77c12..99403b0a97a7 100644 --- a/trunk/drivers/s390/cio/device_fsm.c +++ b/trunk/drivers/s390/cio/device_fsm.c @@ -322,10 +322,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) same_dev = 0; /* Keep the compiler quiet... */ switch (state) { case DEV_STATE_NOT_OPER: - CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " - "subchannel 0.%x.%04x\n", - cdev->private->dev_id.devno, - sch->schid.ssid, sch->schid.sch_no); + CIO_DEBUG(KERN_WARNING, 2, + "SenseID : unknown device %04x on subchannel " + "0.%x.%04x\n", cdev->private->dev_id.devno, + sch->schid.ssid, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { @@ -348,19 +348,20 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) return; } /* Issue device info message. */ - CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " - "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", - cdev->private->dev_id.ssid, - cdev->private->dev_id.devno, - cdev->id.cu_type, cdev->id.cu_model, - cdev->id.dev_type, cdev->id.dev_model); + CIO_DEBUG(KERN_INFO, 2, + "SenseID : device 0.%x.%04x reports: " + "CU Type/Mod = %04X/%02X, Dev Type/Mod = " + "%04X/%02X\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, + cdev->id.cu_type, cdev->id.cu_model, + cdev->id.dev_type, cdev->id.dev_model); break; case DEV_STATE_BOXED: - CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " - " subchannel 0.%x.%04x\n", - cdev->private->dev_id.devno, - sch->schid.ssid, sch->schid.sch_no); + CIO_DEBUG(KERN_WARNING, 2, + "SenseID : boxed device %04x on subchannel " + "0.%x.%04x\n", cdev->private->dev_id.devno, + sch->schid.ssid, sch->schid.sch_no); break; } cdev->private->state = state; @@ -442,8 +443,9 @@ ccw_device_done(struct ccw_device *cdev, int state) if (state == DEV_STATE_BOXED) - CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", - cdev->private->dev_id.devno, sch->schid.sch_no); + CIO_DEBUG(KERN_WARNING, 2, + "Boxed device %04x on subchannel %04x\n", + cdev->private->dev_id.devno, sch->schid.sch_no); if (cdev->private->flags.donotify) { cdev->private->flags.donotify = 0; @@ -898,7 +900,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) /* Basic sense hasn't started. Try again. */ ccw_device_do_sense(cdev, irb); else { - CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " + CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited " "interrupt during w4sense...\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); @@ -1167,10 +1169,8 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) static void ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) { - CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device " - "0.%x.%04x\n", cdev->private->state, dev_event, - cdev->private->dev_id.ssid, - cdev->private->dev_id.devno); + CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n", + cdev->private->state, dev_event); BUG(); } diff --git a/trunk/drivers/s390/cio/device_id.c b/trunk/drivers/s390/cio/device_id.c index cba7020517ed..dc4d87f77f6c 100644 --- a/trunk/drivers/s390/cio/device_id.c +++ b/trunk/drivers/s390/cio/device_id.c @@ -214,7 +214,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) * sense id information. So, for intervention required, * we use the "whack it until it talks" strategy... */ - CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel " + CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel " "0.%x.%04x reports cmd reject\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no); @@ -239,7 +239,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) lpm = to_io_private(sch)->orb.lpm; if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) - CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " + CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " "on subchannel 0.%x.%04x is " "'not operational'\n", lpm, cdev->private->dev_id.devno, diff --git a/trunk/drivers/s390/cio/device_pgid.c b/trunk/drivers/s390/cio/device_pgid.c index ba559053402e..c52449a1f9fc 100644 --- a/trunk/drivers/s390/cio/device_pgid.c +++ b/trunk/drivers/s390/cio/device_pgid.c @@ -79,7 +79,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) /* ret is 0, -EBUSY, -EACCES or -ENODEV */ if (ret != -EACCES) return ret; - CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel " + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not " "operational'\n", cdev->private->dev_id.devno, @@ -159,7 +159,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) u8 lpm; lpm = to_io_private(sch)->orb.lpm; - CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," + CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, lpm); @@ -275,7 +275,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) return ret; } /* PGID command failed on this path. */ - CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel " + CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, cdev->private->imask); @@ -317,7 +317,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev) return ret; } /* nop command failed on this path. */ - CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel " + CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, cdev->private->imask); @@ -362,7 +362,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) return -EAGAIN; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," + CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, cdev->private->imask); @@ -391,7 +391,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) return -ETIME; } if (irb->scsw.cc == 3) { - CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," + CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, cdev->private->imask); diff --git a/trunk/drivers/s390/s390mach.c b/trunk/drivers/s390/s390mach.c index 5080f343ad74..4d4b54277c43 100644 --- a/trunk/drivers/s390/s390mach.c +++ b/trunk/drivers/s390/s390mach.c @@ -48,11 +48,10 @@ s390_collect_crw_info(void *param) int ccode; struct semaphore *sem; unsigned int chain; - int ignore; sem = (struct semaphore *)param; repeat: - ignore = down_interruptible(sem); + down_interruptible(sem); chain = 0; while (1) { if (unlikely(chain > 1)) { diff --git a/trunk/drivers/scsi/Kconfig b/trunk/drivers/scsi/Kconfig index 81ccbd7f9e34..46d7e400c8be 100644 --- a/trunk/drivers/scsi/Kconfig +++ b/trunk/drivers/scsi/Kconfig @@ -1679,7 +1679,6 @@ config MAC_SCSI config SCSI_MAC_ESP tristate "Macintosh NCR53c9[46] SCSI" depends on MAC && SCSI - select SCSI_SPI_ATTRS help This is the NCR 53c9x SCSI controller found on most of the 68040 based Macintoshes. diff --git a/trunk/drivers/scsi/dpt_i2o.c b/trunk/drivers/scsi/dpt_i2o.c index 8508816f303d..0fb5bf4c43ac 100644 --- a/trunk/drivers/scsi/dpt_i2o.c +++ b/trunk/drivers/scsi/dpt_i2o.c @@ -1967,6 +1967,45 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) return rcode; } + +/* + * This routine returns information about the system. This does not effect + * any logic and if the info is wrong - it doesn't matter. + */ + +/* Get all the info we can not get from kernel services */ +static int adpt_system_info(void __user *buffer) +{ + sysInfo_S si; + + memset(&si, 0, sizeof(si)); + + si.osType = OS_LINUX; + si.osMajorVersion = 0; + si.osMinorVersion = 0; + si.osRevision = 0; + si.busType = SI_PCI_BUS; + si.processorFamily = DPTI_sig.dsProcessorFamily; + +#if defined __i386__ + adpt_i386_info(&si); +#elif defined (__ia64__) + adpt_ia64_info(&si); +#elif defined(__sparc__) + adpt_sparc_info(&si); +#elif defined (__alpha__) + adpt_alpha_info(&si); +#else + si.processorType = 0xff ; +#endif + if(copy_to_user(buffer, &si, sizeof(si))){ + printk(KERN_WARNING"dpti: Could not copy buffer TO user\n"); + return -EFAULT; + } + + return 0; +} + #if defined __ia64__ static void adpt_ia64_info(sysInfo_S* si) { @@ -1977,6 +2016,7 @@ static void adpt_ia64_info(sysInfo_S* si) } #endif + #if defined __sparc__ static void adpt_sparc_info(sysInfo_S* si) { @@ -1986,6 +2026,7 @@ static void adpt_sparc_info(sysInfo_S* si) si->processorType = PROC_ULTRASPARC; } #endif + #if defined __alpha__ static void adpt_alpha_info(sysInfo_S* si) { @@ -1997,6 +2038,7 @@ static void adpt_alpha_info(sysInfo_S* si) #endif #if defined __i386__ + static void adpt_i386_info(sysInfo_S* si) { // This is all the info we need for now @@ -2017,45 +2059,9 @@ static void adpt_i386_info(sysInfo_S* si) break; } } -#endif - -/* - * This routine returns information about the system. This does not effect - * any logic and if the info is wrong - it doesn't matter. - */ -/* Get all the info we can not get from kernel services */ -static int adpt_system_info(void __user *buffer) -{ - sysInfo_S si; - - memset(&si, 0, sizeof(si)); - - si.osType = OS_LINUX; - si.osMajorVersion = 0; - si.osMinorVersion = 0; - si.osRevision = 0; - si.busType = SI_PCI_BUS; - si.processorFamily = DPTI_sig.dsProcessorFamily; - -#if defined __i386__ - adpt_i386_info(&si); -#elif defined (__ia64__) - adpt_ia64_info(&si); -#elif defined(__sparc__) - adpt_sparc_info(&si); -#elif defined (__alpha__) - adpt_alpha_info(&si); -#else - si.processorType = 0xff ; #endif - if (copy_to_user(buffer, &si, sizeof(si))){ - printk(KERN_WARNING"dpti: Could not copy buffer TO user\n"); - return -EFAULT; - } - return 0; -} static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) diff --git a/trunk/drivers/scsi/dpti.h b/trunk/drivers/scsi/dpti.h index 337746d46043..924cd5a51676 100644 --- a/trunk/drivers/scsi/dpti.h +++ b/trunk/drivers/scsi/dpti.h @@ -316,6 +316,19 @@ static int adpt_close(struct inode *inode, struct file *file); static void adpt_delay(int millisec); #endif +#if defined __ia64__ +static void adpt_ia64_info(sysInfo_S* si); +#endif +#if defined __sparc__ +static void adpt_sparc_info(sysInfo_S* si); +#endif +#if defined __alpha__ +static void adpt_sparc_info(sysInfo_S* si); +#endif +#if defined __i386__ +static void adpt_i386_info(sysInfo_S* si); +#endif + #define PRINT_BUFFER_SIZE 512 #define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags diff --git a/trunk/drivers/serial/bfin_5xx.c b/trunk/drivers/serial/bfin_5xx.c index d6b4ead693b7..8a2f6a1baa74 100644 --- a/trunk/drivers/serial/bfin_5xx.c +++ b/trunk/drivers/serial/bfin_5xx.c @@ -65,6 +65,9 @@ static void bfin_serial_stop_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; struct circ_buf *xmit = &uart->port.info->xmit; +#if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) + unsigned short ier; +#endif while (!(UART_GET_LSR(uart) & TEMT)) cpu_relax(); @@ -79,8 +82,12 @@ static void bfin_serial_stop_tx(struct uart_port *port) #ifdef CONFIG_BF54x /* Clear TFI bit */ UART_PUT_LSR(uart, TFI); -#endif UART_CLEAR_IER(uart, ETBEI); +#else + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); +#endif #endif } @@ -95,7 +102,14 @@ static void bfin_serial_start_tx(struct uart_port *port) if (uart->tx_done) bfin_serial_dma_tx_chars(uart); #else +#ifdef CONFIG_BF54x UART_SET_IER(uart, ETBEI); +#else + unsigned short ier; + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); +#endif bfin_serial_tx_chars(uart); #endif } @@ -106,10 +120,21 @@ static void bfin_serial_start_tx(struct uart_port *port) static void bfin_serial_stop_rx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; -#ifdef CONFIG_KGDB_UART - if (uart->port.line != CONFIG_KGDB_UART_PORT) +#ifdef CONFIG_KGDB_UART + if (uart->port.line != CONFIG_KGDB_UART_PORT) { #endif +#ifdef CONFIG_BF54x UART_CLEAR_IER(uart, ERBFI); +#else + unsigned short ier; + + ier = UART_GET_IER(uart); + ier &= ~ERBFI; + UART_PUT_IER(uart, ier); +#endif +#ifdef CONFIG_KGDB_UART + } +#endif } /* @@ -136,7 +161,10 @@ void kgdb_put_debug_char(int chr) SSYNC(); } - UART_CLEAR_DLAB(uart); +#ifndef CONFIG_BF54x + UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB)); + SSYNC(); +#endif UART_PUT_CHAR(uart, (unsigned char)chr); SSYNC(); } @@ -155,7 +183,10 @@ int kgdb_get_debug_char(void) while(!(UART_GET_LSR(uart) & DR)) { SSYNC(); } - UART_CLEAR_DLAB(uart); +#ifndef CONFIG_BF54x + UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB)); + SSYNC(); +#endif chr = UART_GET_CHAR(uart); SSYNC(); @@ -177,6 +208,9 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) struct tty_struct *tty = uart->port.info->tty; unsigned int status, ch, flg; static struct timeval anomaly_start = { .tv_sec = 0 }; +#ifdef CONFIG_KGDB_UART + struct pt_regs *regs = get_irq_regs(); +#endif status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); @@ -186,7 +220,6 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) #ifdef CONFIG_KGDB_UART if (uart->port.line == CONFIG_KGDB_UART_PORT) { - struct pt_regs *regs = get_irq_regs(); if (uart->port.cons->index == CONFIG_KGDB_UART_PORT && ch == 0x1) { /* Ctrl + A */ kgdb_breakkey_pressed(regs); return; @@ -358,6 +391,7 @@ static void bfin_serial_do_work(struct work_struct *work) static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; uart->tx_done = 0; @@ -395,7 +429,13 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) set_dma_x_modify(uart->tx_dma_channel, 1); enable_dma(uart->tx_dma_channel); +#ifdef CONFIG_BF54x UART_SET_IER(uart, ETBEI); +#else + ier = UART_GET_IER(uart); + ier |= ETBEI; + UART_PUT_IER(uart, ier); +#endif } static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) @@ -473,12 +513,19 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; struct circ_buf *xmit = &uart->port.info->xmit; + unsigned short ier; spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { disable_dma(uart->tx_dma_channel); clear_dma_irqstat(uart->tx_dma_channel); +#ifdef CONFIG_BF54x UART_CLEAR_IER(uart, ETBEI); +#else + ier = UART_GET_IER(uart); + ier &= ~ETBEI; + UART_PUT_IER(uart, ier); +#endif xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; @@ -654,6 +701,7 @@ static int bfin_serial_startup(struct uart_port *port) # endif } + if (request_irq (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED, "BFIN_UART_TX", uart)) { @@ -662,7 +710,11 @@ static int bfin_serial_startup(struct uart_port *port) return -EBUSY; } #endif +#ifdef CONFIG_BF54x UART_SET_IER(uart, ERBFI); +#else + UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI); +#endif return 0; } @@ -758,15 +810,26 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, UART_PUT_IER(uart, 0); #endif +#ifndef CONFIG_BF54x /* Set DLAB in LCR to Access DLL and DLH */ - UART_SET_DLAB(uart); + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); +#endif UART_PUT_DLL(uart, quot & 0xFF); + SSYNC(); UART_PUT_DLH(uart, (quot >> 8) & 0xFF); SSYNC(); +#ifndef CONFIG_BF54x /* Clear DLAB in LCR to Access THR RBR IER */ - UART_CLEAR_DLAB(uart); + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); + SSYNC(); +#endif UART_PUT_LCR(uart, lcr); @@ -929,7 +992,8 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, status = UART_GET_IER(uart) & (ERBFI | ETBEI); if (status == (ERBFI | ETBEI)) { /* ok, the port was enabled */ - u16 lcr, dlh, dll; + unsigned short lcr, val; + unsigned short dlh, dll; lcr = UART_GET_LCR(uart); @@ -946,14 +1010,22 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, case 2: *bits = 7; break; case 3: *bits = 8; break; } +#ifndef CONFIG_BF54x /* Set DLAB in LCR to Access DLL and DLH */ - UART_SET_DLAB(uart); + val = UART_GET_LCR(uart); + val |= DLAB; + UART_PUT_LCR(uart, val); +#endif dll = UART_GET_DLL(uart); dlh = UART_GET_DLH(uart); +#ifndef CONFIG_BF54x /* Clear DLAB in LCR to Access THR RBR IER */ - UART_CLEAR_DLAB(uart); + val = UART_GET_LCR(uart); + val &= ~DLAB; + UART_PUT_LCR(uart, val); +#endif *baud = get_sclk() / (16*(dll | dlh << 8)); } @@ -1218,7 +1290,11 @@ static int __init bfin_serial_init(void) request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, "BFIN_UART_RX", uart); pr_info("Request irq for kgdb uart port\n"); +#ifdef CONFIG_BF54x UART_SET_IER(uart, ERBFI); +#else + UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI); +#endif SSYNC(); t.c_cflag = CS8|B57600; t.c_iflag = 0; diff --git a/trunk/drivers/serial/serial_core.c b/trunk/drivers/serial/serial_core.c index eab032733790..1e2b9d826f69 100644 --- a/trunk/drivers/serial/serial_core.c +++ b/trunk/drivers/serial/serial_core.c @@ -556,7 +556,7 @@ static int uart_chars_in_buffer(struct tty_struct *tty) static void uart_flush_buffer(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; - struct uart_port *port; + struct uart_port *port = state->port; unsigned long flags; /* @@ -568,7 +568,6 @@ static void uart_flush_buffer(struct tty_struct *tty) return; } - port = state->port; pr_debug("uart_flush_buffer(%d) called\n", tty->index); spin_lock_irqsave(&port->lock, flags); diff --git a/trunk/drivers/serial/sh-sci.c b/trunk/drivers/serial/sh-sci.c index 8fdafc27fce8..969106187718 100644 --- a/trunk/drivers/serial/sh-sci.c +++ b/trunk/drivers/serial/sh-sci.c @@ -42,12 +42,14 @@ #include #include #include + +#ifdef CONFIG_CPU_FREQ #include #include -#include -#include +#endif -#ifdef CONFIG_SUPERH +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) +#include #include #include #include @@ -78,7 +80,7 @@ struct sci_port { struct timer_list break_timer; int break_flag; -#ifdef CONFIG_SUPERH +#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) /* Port clock */ struct clk *clk; #endif @@ -363,19 +365,21 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) { unsigned int fcr_val = 0; - unsigned short data; - if (port->mapbase == 0xffe00000) { - data = ctrl_inw(PSCR); - data &= ~0x03cf; - if (cflag & CRTSCTS) - fcr_val |= SCFCR_MCE; - else - data |= 0x0340; + if (cflag & CRTSCTS) { + fcr_val |= SCFCR_MCE; + + ctrl_outw(0x0000, PORT_PSCR); + } else { + unsigned short data; + + data = ctrl_inw(PORT_PSCR); + data &= 0x033f; + data |= 0x0400; + ctrl_outw(data, PORT_PSCR); - ctrl_outw(data, PSCR); + ctrl_outw(ctrl_inw(SCSPTR0) & 0x17, SCSPTR0); } - /* SCIF1 and SCIF2 should be setup by board code */ sci_out(port, SCFCR, fcr_val); } diff --git a/trunk/drivers/serial/sh-sci.h b/trunk/drivers/serial/sh-sci.h index eb84833233fd..fa8700a968fc 100644 --- a/trunk/drivers/serial/sh-sci.h +++ b/trunk/drivers/serial/sh-sci.h @@ -76,13 +76,12 @@ # define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */ # define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH7722) -# define PADR 0xA4050120 -# define PSDR 0xA405013e -# define PWDR 0xA4050166 -# define PSCR 0xA405011E +# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ +# define SCSPTR0 SCPDR0 # define SCIF_ORER 0x0001 /* overrun error bit */ # define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ # define SCIF_ONLY +# define PORT_PSCR 0xA405011E #elif defined(CONFIG_CPU_SUBTYPE_SH7366) # define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ # define SCSPTR0 SCPDR0 @@ -321,7 +320,7 @@ unsigned int addr = port->mapbase + (offset); \ if ((size) == 8) { \ ctrl_outb(value, addr); \ - } else if ((size) == 16) { \ + } else { \ ctrl_outw(value, addr); \ } @@ -452,11 +451,7 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) SCIF_FNS(SCLSR, 0, 0, 0x28, 16) #else SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7722) -SCIF_FNS(SCSPTR, 0, 0, 0, 0) -#else SCIF_FNS(SCSPTR, 0, 0, 0x20, 16) -#endif SCIF_FNS(SCLSR, 0, 0, 0x24, 16) #endif #endif @@ -598,25 +593,13 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7366) +#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366) static inline int sci_rxd_in(struct uart_port *port) { if (port->mapbase == 0xffe00000) return ctrl_inb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000) - return ctrl_inb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */ - if (port->mapbase == 0xffe10000) - return ctrl_inb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */ - if (port->mapbase == 0xffe20000) - return ctrl_inb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */ - - return 1; -} #elif defined(CONFIG_CPU_SUBTYPE_SH7723) static inline int sci_rxd_in(struct uart_port *port) { diff --git a/trunk/drivers/usb/serial/iuu_phoenix.c b/trunk/drivers/usb/serial/iuu_phoenix.c index a01e987c7d32..8a217648b250 100644 --- a/trunk/drivers/usb/serial/iuu_phoenix.c +++ b/trunk/drivers/usb/serial/iuu_phoenix.c @@ -643,7 +643,7 @@ static void read_buf_callback(struct urb *urb) static int iuu_bulk_write(struct usb_serial_port *port) { struct iuu_private *priv = usb_get_serial_port_data(port); - unsigned long flags; + unsigned int flags; int result; int i; char *buf_ptr = port->write_urb->transfer_buffer; @@ -694,7 +694,7 @@ static void iuu_uart_read_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct iuu_private *priv = usb_get_serial_port_data(port); - unsigned long flags; + unsigned int flags; int status; int error = 0; int len = 0; @@ -759,7 +759,7 @@ static int iuu_uart_write(struct usb_serial_port *port, const u8 *buf, int count) { struct iuu_private *priv = usb_get_serial_port_data(port); - unsigned long flags; + unsigned int flags; dbg("%s - enter", __func__); if (count > 256) diff --git a/trunk/drivers/video/bw2.c b/trunk/drivers/video/bw2.c index 79f85dc402d6..e721644bad74 100644 --- a/trunk/drivers/video/bw2.c +++ b/trunk/drivers/video/bw2.c @@ -17,11 +17,9 @@ #include #include #include +#include #include -#include -#include -#include #include #include "sbuslib.h" @@ -299,7 +297,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id * par->physbase = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; - sbusfb_fill_var(&info->var, dp->node, 1); + sbusfb_fill_var(&info->var, dp, 1); linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); diff --git a/trunk/drivers/video/cg14.c b/trunk/drivers/video/cg14.c index 0db0fecba93b..b17e74671779 100644 --- a/trunk/drivers/video/cg14.c +++ b/trunk/drivers/video/cg14.c @@ -17,10 +17,9 @@ #include #include #include +#include #include -#include -#include #include #include "sbuslib.h" @@ -482,7 +481,7 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id spin_lock_init(&par->lock); - sbusfb_fill_var(&info->var, dp->node, 8); + sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; diff --git a/trunk/drivers/video/cg3.c b/trunk/drivers/video/cg3.c index e31e26a6bb79..3aa7b6cb0268 100644 --- a/trunk/drivers/video/cg3.c +++ b/trunk/drivers/video/cg3.c @@ -17,11 +17,9 @@ #include #include #include +#include #include -#include -#include -#include #include #include "sbuslib.h" @@ -373,7 +371,7 @@ static int __devinit cg3_probe(struct of_device *op, par->physbase = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; - sbusfb_fill_var(&info->var, dp->node, 8); + sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; diff --git a/trunk/drivers/video/cg6.c b/trunk/drivers/video/cg6.c index 8000bccecdc6..2f64bb3bd254 100644 --- a/trunk/drivers/video/cg6.c +++ b/trunk/drivers/video/cg6.c @@ -17,9 +17,9 @@ #include #include #include +#include #include -#include #include #include "sbuslib.h" @@ -728,7 +728,7 @@ static int __devinit cg6_probe(struct of_device *op, par->physbase = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; - sbusfb_fill_var(&info->var, dp->node, 8); + sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; diff --git a/trunk/drivers/video/ffb.c b/trunk/drivers/video/ffb.c index 0f42a696d176..7992b13ee68f 100644 --- a/trunk/drivers/video/ffb.c +++ b/trunk/drivers/video/ffb.c @@ -16,11 +16,10 @@ #include #include #include +#include #include #include -#include -#include #include #include "sbuslib.h" @@ -941,7 +940,7 @@ static int __devinit ffb_probe(struct of_device *op, info->screen_base = (char *) par->physbase + FFB_DFB24_POFF; info->pseudo_palette = par->pseudo_palette; - sbusfb_fill_var(&info->var, dp->node, 32); + sbusfb_fill_var(&info->var, dp, 32); par->fbsize = PAGE_ALIGN(info->var.xres * info->var.yres * 4); ffb_fixup_var_rgb(&info->var); diff --git a/trunk/drivers/video/leo.c b/trunk/drivers/video/leo.c index fb129928d5d5..8bc46e930340 100644 --- a/trunk/drivers/video/leo.c +++ b/trunk/drivers/video/leo.c @@ -16,10 +16,9 @@ #include #include #include +#include #include -#include -#include #include #include "sbuslib.h" @@ -562,7 +561,7 @@ static int __devinit leo_probe(struct of_device *op, const struct of_device_id * par->physbase = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; - sbusfb_fill_var(&info->var, dp->node, 32); + sbusfb_fill_var(&info->var, dp, 32); leo_fixup_var_rgb(&info->var); linebytes = of_getintprop_default(dp, "linebytes", diff --git a/trunk/drivers/video/p9100.c b/trunk/drivers/video/p9100.c index 676ffb06d1c7..9e903454ffc1 100644 --- a/trunk/drivers/video/p9100.c +++ b/trunk/drivers/video/p9100.c @@ -15,10 +15,9 @@ #include #include #include +#include #include -#include -#include #include #include "sbuslib.h" @@ -275,7 +274,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id par->physbase = op->resource[2].start; par->which_io = op->resource[2].flags & IORESOURCE_BITS; - sbusfb_fill_var(&info->var, dp->node, 8); + sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; diff --git a/trunk/drivers/video/pxafb.c b/trunk/drivers/video/pxafb.c index 48aea39c35a5..3ab6e3d973a1 100644 --- a/trunk/drivers/video/pxafb.c +++ b/trunk/drivers/video/pxafb.c @@ -1301,8 +1301,8 @@ static void pxafb_decode_mode_info(struct pxafb_info *fbi, } } -static void pxafb_decode_mach_info(struct pxafb_info *fbi, - struct pxafb_mach_info *inf) +static int pxafb_decode_mach_info(struct pxafb_info *fbi, + struct pxafb_mach_info *inf) { unsigned int lcd_conn = inf->lcd_conn; @@ -1333,7 +1333,7 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi, fbi->lccr0 = inf->lccr0; fbi->lccr3 = inf->lccr3; fbi->lccr4 = inf->lccr4; - goto decode_mode; + return -EINVAL; } if (lcd_conn == LCD_MONO_STN_8BPP) @@ -1343,8 +1343,8 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi, fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; -decode_mode: pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes); + return 0; } static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) diff --git a/trunk/drivers/video/sbuslib.c b/trunk/drivers/video/sbuslib.c index 4deaac05b938..37d764ad56b0 100644 --- a/trunk/drivers/video/sbuslib.c +++ b/trunk/drivers/video/sbuslib.c @@ -10,18 +10,19 @@ #include #include #include +#include -#include #include #include "sbuslib.h" -void sbusfb_fill_var(struct fb_var_screeninfo *var, int prom_node, int bpp) +void sbusfb_fill_var(struct fb_var_screeninfo *var, struct device_node *dp, + int bpp) { memset(var, 0, sizeof(*var)); - var->xres = prom_getintdefault(prom_node, "width", 1152); - var->yres = prom_getintdefault(prom_node, "height", 900); + var->xres = of_getintprop_default(dp, "width", 1152); + var->yres = of_getintprop_default(dp, "height", 900); var->xres_virtual = var->xres; var->yres_virtual = var->yres; var->bits_per_pixel = bpp; diff --git a/trunk/drivers/video/sbuslib.h b/trunk/drivers/video/sbuslib.h index 492828c3fe8f..7ba3250236bd 100644 --- a/trunk/drivers/video/sbuslib.h +++ b/trunk/drivers/video/sbuslib.h @@ -11,7 +11,8 @@ struct sbus_mmap_map { #define SBUS_MMAP_FBSIZE(n) (-n) #define SBUS_MMAP_EMPTY 0x80000000 -extern void sbusfb_fill_var(struct fb_var_screeninfo *var, int prom_node, int bpp); +extern void sbusfb_fill_var(struct fb_var_screeninfo *var, + struct device_node *dp, int bpp); struct vm_area_struct; extern int sbusfb_mmap_helper(struct sbus_mmap_map *map, unsigned long physbase, unsigned long fbsize, @@ -21,6 +22,6 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fb_info *info, int type, int fb_depth, unsigned long fb_size); int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg); + unsigned long arg); #endif /* _SBUSLIB_H */ diff --git a/trunk/drivers/video/sunxvr2500.c b/trunk/drivers/video/sunxvr2500.c index c3869a96ab58..b1dde09e7015 100644 --- a/trunk/drivers/video/sunxvr2500.c +++ b/trunk/drivers/video/sunxvr2500.c @@ -9,10 +9,9 @@ #include #include #include +#include #include -#include -#include struct s3d_info { struct fb_info *info; diff --git a/trunk/drivers/video/sunxvr500.c b/trunk/drivers/video/sunxvr500.c index 71bf3f1f00bc..c2ba51b7ea18 100644 --- a/trunk/drivers/video/sunxvr500.c +++ b/trunk/drivers/video/sunxvr500.c @@ -9,10 +9,9 @@ #include #include #include +#include #include -#include -#include /* XXX This device has a 'dev-comm' property which aparently is * XXX a pointer into the openfirmware's address space which is diff --git a/trunk/drivers/video/tcx.c b/trunk/drivers/video/tcx.c index 44e8c27ed0fc..2a03f78bbb0d 100644 --- a/trunk/drivers/video/tcx.c +++ b/trunk/drivers/video/tcx.c @@ -17,10 +17,9 @@ #include #include #include +#include #include -#include -#include #include #include "sbuslib.h" @@ -385,7 +384,7 @@ static int __devinit tcx_probe(struct of_device *op, par->lowdepth = (of_find_property(dp, "tcx-8-bit", NULL) != NULL); - sbusfb_fill_var(&info->var, dp->node, 8); + sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; diff --git a/trunk/fs/affs/affs.h b/trunk/fs/affs/affs.h index 223b1917093e..d5bd497ab9cb 100644 --- a/trunk/fs/affs/affs.h +++ b/trunk/fs/affs/affs.h @@ -48,7 +48,7 @@ struct affs_ext_key { * affs fs inode data in memory */ struct affs_inode_info { - atomic_t i_opencnt; + u32 i_opencnt; struct semaphore i_link_lock; /* Protects internal inode access. */ struct semaphore i_ext_lock; /* Protects internal inode access. */ #define i_hash_lock i_ext_lock @@ -170,6 +170,8 @@ extern int affs_rename(struct inode *old_dir, struct dentry *old_dentry, extern unsigned long affs_parent_ino(struct inode *dir); extern struct inode *affs_new_inode(struct inode *dir); extern int affs_notify_change(struct dentry *dentry, struct iattr *attr); +extern void affs_put_inode(struct inode *inode); +extern void affs_drop_inode(struct inode *inode); extern void affs_delete_inode(struct inode *inode); extern void affs_clear_inode(struct inode *inode); extern struct inode *affs_iget(struct super_block *sb, diff --git a/trunk/fs/affs/file.c b/trunk/fs/affs/file.c index 6eac7bdeec94..1a4f092f24ef 100644 --- a/trunk/fs/affs/file.c +++ b/trunk/fs/affs/file.c @@ -48,9 +48,8 @@ affs_file_open(struct inode *inode, struct file *filp) { if (atomic_read(&filp->f_count) != 1) return 0; - pr_debug("AFFS: open(%lu,%d)\n", - inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); - atomic_inc(&AFFS_I(inode)->i_opencnt); + pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt); + AFFS_I(inode)->i_opencnt++; return 0; } @@ -59,16 +58,10 @@ affs_file_release(struct inode *inode, struct file *filp) { if (atomic_read(&filp->f_count) != 0) return 0; - pr_debug("AFFS: release(%lu, %d)\n", - inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); - - if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { - mutex_lock(&inode->i_mutex); - if (inode->i_size != AFFS_I(inode)->mmu_private) - affs_truncate(inode); + pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt); + AFFS_I(inode)->i_opencnt--; + if (!AFFS_I(inode)->i_opencnt) affs_free_prealloc(inode); - mutex_unlock(&inode->i_mutex); - } return 0; } @@ -187,7 +180,7 @@ affs_get_extblock(struct inode *inode, u32 ext) /* inline the simplest case: same extended block as last time */ struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; if (ext == AFFS_I(inode)->i_ext_last) - get_bh(bh); + atomic_inc(&bh->b_count); else /* we have to do more (not inlined) */ bh = affs_get_extblock_slow(inode, ext); @@ -313,7 +306,7 @@ affs_get_extblock_slow(struct inode *inode, u32 ext) affs_brelse(AFFS_I(inode)->i_ext_bh); AFFS_I(inode)->i_ext_last = ext; AFFS_I(inode)->i_ext_bh = bh; - get_bh(bh); + atomic_inc(&bh->b_count); return bh; @@ -331,6 +324,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block); + BUG_ON(block > (sector_t)0x7fffffffUL); if (block >= AFFS_I(inode)->i_blkcnt) { @@ -833,8 +827,6 @@ affs_truncate(struct inode *inode) res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); if (!res) res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata); - else - inode->i_size = AFFS_I(inode)->mmu_private; mark_inode_dirty(inode); return; } else if (inode->i_size == AFFS_I(inode)->mmu_private) @@ -870,7 +862,6 @@ affs_truncate(struct inode *inode) blk++; } else AFFS_HEAD(ext_bh)->first_data = 0; - AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); size = AFFS_SB(sb)->s_hashsize; if (size > blkcnt - blk + i) size = blkcnt - blk + i; diff --git a/trunk/fs/affs/inode.c b/trunk/fs/affs/inode.c index a13b334a3910..27fe6cbe43ae 100644 --- a/trunk/fs/affs/inode.c +++ b/trunk/fs/affs/inode.c @@ -58,7 +58,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) AFFS_I(inode)->i_extcnt = 1; AFFS_I(inode)->i_ext_last = ~1; AFFS_I(inode)->i_protect = prot; - atomic_set(&AFFS_I(inode)->i_opencnt, 0); + AFFS_I(inode)->i_opencnt = 0; AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; @@ -108,6 +108,8 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) inode->i_mode |= S_IFDIR; } else inode->i_mode = S_IRUGO | S_IXUGO | S_IWUSR | S_IFDIR; + if (tail->link_chain) + inode->i_nlink = 2; /* Maybe it should be controlled by mount parameter? */ //inode->i_mode |= S_ISVTX; inode->i_op = &affs_dir_inode_operations; @@ -242,13 +244,32 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr) return error; } +void +affs_put_inode(struct inode *inode) +{ + pr_debug("AFFS: put_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); + affs_free_prealloc(inode); +} + +void +affs_drop_inode(struct inode *inode) +{ + mutex_lock(&inode->i_mutex); + if (inode->i_size != AFFS_I(inode)->mmu_private) + affs_truncate(inode); + mutex_unlock(&inode->i_mutex); + + generic_drop_inode(inode); +} + void affs_delete_inode(struct inode *inode) { pr_debug("AFFS: delete_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); truncate_inode_pages(&inode->i_data, 0); inode->i_size = 0; - affs_truncate(inode); + if (S_ISREG(inode->i_mode)) + affs_truncate(inode); clear_inode(inode); affs_free_block(inode->i_sb, inode->i_ino); } @@ -256,12 +277,9 @@ affs_delete_inode(struct inode *inode) void affs_clear_inode(struct inode *inode) { - unsigned long cache_page; + unsigned long cache_page = (unsigned long) AFFS_I(inode)->i_lc; pr_debug("AFFS: clear_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); - - affs_free_prealloc(inode); - cache_page = (unsigned long)AFFS_I(inode)->i_lc; if (cache_page) { pr_debug("AFFS: freeing ext cache\n"); AFFS_I(inode)->i_lc = NULL; @@ -298,7 +316,7 @@ affs_new_inode(struct inode *dir) inode->i_ino = block; inode->i_nlink = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; - atomic_set(&AFFS_I(inode)->i_opencnt, 0); + AFFS_I(inode)->i_opencnt = 0; AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; @@ -351,12 +369,12 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3 switch (type) { case ST_LINKFILE: case ST_LINKDIR: + inode_bh = bh; retval = -ENOSPC; block = affs_alloc_block(dir, dir->i_ino); if (!block) goto err; retval = -EIO; - inode_bh = bh; bh = affs_getzeroblk(sb, block); if (!bh) goto err; diff --git a/trunk/fs/affs/namei.c b/trunk/fs/affs/namei.c index cfcf1b6cf82b..2218f1ee71ce 100644 --- a/trunk/fs/affs/namei.c +++ b/trunk/fs/affs/namei.c @@ -234,8 +234,7 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) int affs_unlink(struct inode *dir, struct dentry *dentry) { - pr_debug("AFFS: unlink(dir=%d, %lu \"%.*s\")\n", (u32)dir->i_ino, - dentry->d_inode->i_ino, + pr_debug("AFFS: unlink(dir=%d, \"%.*s\")\n", (u32)dir->i_ino, (int)dentry->d_name.len, dentry->d_name.name); return affs_remove_header(dentry); @@ -303,8 +302,7 @@ affs_mkdir(struct inode *dir, struct dentry *dentry, int mode) int affs_rmdir(struct inode *dir, struct dentry *dentry) { - pr_debug("AFFS: rmdir(dir=%u, %lu \"%.*s\")\n", (u32)dir->i_ino, - dentry->d_inode->i_ino, + pr_debug("AFFS: rmdir(dir=%u, \"%.*s\")\n", (u32)dir->i_ino, (int)dentry->d_name.len, dentry->d_name.name); return affs_remove_header(dentry); diff --git a/trunk/fs/affs/super.c b/trunk/fs/affs/super.c index d214837d5e42..01d25d532541 100644 --- a/trunk/fs/affs/super.c +++ b/trunk/fs/affs/super.c @@ -71,18 +71,12 @@ static struct kmem_cache * affs_inode_cachep; static struct inode *affs_alloc_inode(struct super_block *sb) { - struct affs_inode_info *i; - - i = kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL); - if (!i) + struct affs_inode_info *ei; + ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL); + if (!ei) return NULL; - - i->vfs_inode.i_version = 1; - i->i_lc = NULL; - i->i_ext_bh = NULL; - i->i_pa_cnt = 0; - - return &i->vfs_inode; + ei->vfs_inode.i_version = 1; + return &ei->vfs_inode; } static void affs_destroy_inode(struct inode *inode) @@ -120,6 +114,8 @@ static const struct super_operations affs_sops = { .alloc_inode = affs_alloc_inode, .destroy_inode = affs_destroy_inode, .write_inode = affs_write_inode, + .put_inode = affs_put_inode, + .drop_inode = affs_drop_inode, .delete_inode = affs_delete_inode, .clear_inode = affs_clear_inode, .put_super = affs_put_super, diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c index 78562574cb52..799f86deff24 100644 --- a/trunk/fs/bio.c +++ b/trunk/fs/bio.c @@ -158,7 +158,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) bio_init(bio); if (likely(nr_iovecs)) { - unsigned long uninitialized_var(idx); + unsigned long idx = 0; /* shut up gcc */ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) { @@ -963,7 +963,6 @@ static void bio_copy_kern_endio(struct bio *bio, int err) * @data: pointer to buffer to copy * @len: length in bytes * @gfp_mask: allocation flags for bio and page allocation - * @reading: data direction is READ * * copy the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. diff --git a/trunk/fs/cifs/CHANGES b/trunk/fs/cifs/CHANGES index 8355e918fddf..05c9da6181c3 100644 --- a/trunk/fs/cifs/CHANGES +++ b/trunk/fs/cifs/CHANGES @@ -1,6 +1,3 @@ -Version 1.53 ------------- - Version 1.52 ------------ Fix oops on second mount to server when null auth is used. diff --git a/trunk/fs/cifs/asn1.c b/trunk/fs/cifs/asn1.c index cb52cbbe45ff..bcda2c6b6a04 100644 --- a/trunk/fs/cifs/asn1.c +++ b/trunk/fs/cifs/asn1.c @@ -460,8 +460,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, unsigned char *sequence_end; unsigned long *oid = NULL; unsigned int cls, con, tag, oidlen, rc; - bool use_ntlmssp = false; - bool use_kerberos = false; + int use_ntlmssp = FALSE; + int use_kerberos = FALSE; *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/ @@ -561,15 +561,15 @@ decode_negTokenInit(unsigned char *security_blob, int length, if (compare_oid(oid, oidlen, MSKRB5_OID, MSKRB5_OID_LEN)) - use_kerberos = true; + use_kerberos = TRUE; else if (compare_oid(oid, oidlen, KRB5_OID, KRB5_OID_LEN)) - use_kerberos = true; + use_kerberos = TRUE; else if (compare_oid(oid, oidlen, NTLMSSP_OID, NTLMSSP_OID_LEN)) - use_ntlmssp = true; + use_ntlmssp = TRUE; kfree(oid); } diff --git a/trunk/fs/cifs/cifs_dfs_ref.c b/trunk/fs/cifs/cifs_dfs_ref.c index f6fdecf6598c..95024c066d89 100644 --- a/trunk/fs/cifs/cifs_dfs_ref.c +++ b/trunk/fs/cifs/cifs_dfs_ref.c @@ -93,11 +93,15 @@ static char *cifs_get_share_name(const char *node_name) /* find sharename end */ pSep++; pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC)); - if (pSep) { - /* trim path up to sharename end - * now we have share name in UNC */ - *pSep = 0; + if (!pSep) { + cERROR(1, ("%s:2 cant find share name in node name: %s", + __func__, node_name)); + kfree(UNC); + return NULL; } + /* trim path up to sharename end + * * now we have share name in UNC */ + *pSep = 0; return UNC; } @@ -184,7 +188,7 @@ static char *compose_mount_options(const char *sb_mountdata, tkn_e = strchr(tkn_e+1, '\\'); if (tkn_e) { strcat(mountdata, ",prefixpath="); - strcat(mountdata, tkn_e+1); + strcat(mountdata, tkn_e); } } @@ -240,8 +244,7 @@ static char *build_full_dfs_path_from_dentry(struct dentry *dentry) return NULL; if (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS) { - int i; - /* we should use full path name for correct working with DFS */ + /* we should use full path name to correct working with DFS */ l_max_len = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE+1) + strnlen(search_path, MAX_PATHCONF) + 1; tmp_path = kmalloc(l_max_len, GFP_KERNEL); @@ -250,14 +253,8 @@ static char *build_full_dfs_path_from_dentry(struct dentry *dentry) return NULL; } strncpy(tmp_path, cifs_sb->tcon->treeName, l_max_len); + strcat(tmp_path, search_path); tmp_path[l_max_len-1] = 0; - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) - for (i = 0; i < l_max_len; i++) { - if (tmp_path[i] == '\\') - tmp_path[i] = '/'; - } - strncat(tmp_path, search_path, l_max_len - strlen(tmp_path)); - full_path = tmp_path; kfree(search_path); } else { diff --git a/trunk/fs/cifs/cifsacl.c b/trunk/fs/cifs/cifsacl.c index 34902cff5400..e99d4faf5f02 100644 --- a/trunk/fs/cifs/cifsacl.c +++ b/trunk/fs/cifs/cifsacl.c @@ -559,7 +559,7 @@ static struct cifs_ntsd *get_cifs_acl(u32 *pacllen, struct inode *inode, const char *path, const __u16 *pfid) { struct cifsFileInfo *open_file = NULL; - bool unlock_file = false; + int unlock_file = FALSE; int xid; int rc = -EIO; __u16 fid; @@ -586,10 +586,10 @@ static struct cifs_ntsd *get_cifs_acl(u32 *pacllen, struct inode *inode, cifs_sb = CIFS_SB(sb); if (open_file) { - unlock_file = true; + unlock_file = TRUE; fid = open_file->netfid; } else if (pfid == NULL) { - int oplock = 0; + int oplock = FALSE; /* open file */ rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, READ_CONTROL, 0, &fid, &oplock, NULL, @@ -604,7 +604,7 @@ static struct cifs_ntsd *get_cifs_acl(u32 *pacllen, struct inode *inode, rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen); cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen)); - if (unlock_file == true) /* find_readable_file increments ref count */ + if (unlock_file == TRUE) /* find_readable_file increments ref count */ atomic_dec(&open_file->wrtPending); else if (pfid == NULL) /* if opened above we have to close the handle */ CIFSSMBClose(xid, cifs_sb->tcon, fid); @@ -619,7 +619,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, struct inode *inode, const char *path) { struct cifsFileInfo *open_file; - bool unlock_file = false; + int unlock_file = FALSE; int xid; int rc = -EIO; __u16 fid; @@ -640,10 +640,10 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, open_file = find_readable_file(CIFS_I(inode)); if (open_file) { - unlock_file = true; + unlock_file = TRUE; fid = open_file->netfid; } else { - int oplock = 0; + int oplock = FALSE; /* open file */ rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, WRITE_DAC, 0, &fid, &oplock, NULL, @@ -658,7 +658,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); cFYI(DBG2, ("SetCIFSACL rc = %d", rc)); - if (unlock_file) + if (unlock_file == TRUE) atomic_dec(&open_file->wrtPending); else CIFSSMBClose(xid, cifs_sb->tcon, fid); diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index 427a7c695896..39c2cbdface7 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -222,50 +222,50 @@ static int cifs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; - struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - struct cifsTconInfo *tcon = cifs_sb->tcon; - int rc = -EOPNOTSUPP; int xid; + int rc = -EOPNOTSUPP; + struct cifs_sb_info *cifs_sb; + struct cifsTconInfo *pTcon; xid = GetXid(); + cifs_sb = CIFS_SB(sb); + pTcon = cifs_sb->tcon; + buf->f_type = CIFS_MAGIC_NUMBER; - /* - * PATH_MAX may be too long - it would presumably be total path, - * but note that some servers (includinng Samba 3) have a shorter - * maximum path. - * - * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO. - */ - buf->f_namelen = PATH_MAX; + /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */ + buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would + presumably be total path, but note + that some servers (includinng Samba 3) + have a shorter maximum path */ buf->f_files = 0; /* undefined */ buf->f_ffree = 0; /* unlimited */ - /* - * We could add a second check for a QFS Unix capability bit - */ - if ((tcon->ses->capabilities & CAP_UNIX) && - (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability))) - rc = CIFSSMBQFSPosixInfo(xid, tcon, buf); - - /* - * Only need to call the old QFSInfo if failed on newer one, - * e.g. by OS/2. - **/ - if (rc && (tcon->ses->capabilities & CAP_NT_SMBS)) - rc = CIFSSMBQFSInfo(xid, tcon, buf); - - /* - * Some old Windows servers also do not support level 103, retry with - * older level one if old server failed the previous call or we - * bypassed it because we detected that this was an older LANMAN sess - */ +/* BB we could add a second check for a QFS Unix capability bit */ +/* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */ + if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS & + le64_to_cpu(pTcon->fsUnixInfo.Capability))) + rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf); + + /* Only need to call the old QFSInfo if failed + on newer one */ + if (rc) + if (pTcon->ses->capabilities & CAP_NT_SMBS) + rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */ + + /* Some old Windows servers also do not support level 103, retry with + older level one if old server failed the previous call or we + bypassed it because we detected that this was an older LANMAN sess */ if (rc) - rc = SMBOldQFSInfo(xid, tcon, buf); - + rc = SMBOldQFSInfo(xid, pTcon, buf); + /* int f_type; + __fsid_t f_fsid; + int f_namelen; */ + /* BB get from info in tcon struct at mount time call to QFSAttrInfo */ FreeXid(xid); - return 0; + return 0; /* always return success? what if volume is no + longer available? */ } static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd) @@ -306,8 +306,8 @@ cifs_alloc_inode(struct super_block *sb) /* Until the file is open and we have gotten oplock info back from the server, can not assume caching of file data or metadata */ - cifs_inode->clientCanCacheRead = false; - cifs_inode->clientCanCacheAll = false; + cifs_inode->clientCanCacheRead = FALSE; + cifs_inode->clientCanCacheAll = FALSE; cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ /* Can not set i_flags here - they get immediately overwritten @@ -940,7 +940,7 @@ static int cifs_oplock_thread(void *dummyarg) rc = CIFSSMBLock(0, pTcon, netfid, 0 /* len */ , 0 /* offset */, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, - false /* wait flag */); + 0 /* wait flag */); cFYI(1, ("Oplock release rc = %d", rc)); } } else diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h index cd1301a09b3b..e1dd9f32e1d7 100644 --- a/trunk/fs/cifs/cifsfs.h +++ b/trunk/fs/cifs/cifsfs.h @@ -24,6 +24,14 @@ #define ROOT_I 2 +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + extern struct file_system_type cifs_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; @@ -102,5 +110,5 @@ extern int cifs_ioctl(struct inode *inode, struct file *filep, extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.53" +#define CIFS_VERSION "1.52" #endif /* _CIFSFS_H */ diff --git a/trunk/fs/cifs/cifsglob.h b/trunk/fs/cifs/cifsglob.h index b7d9f698e63e..69a2e1942542 100644 --- a/trunk/fs/cifs/cifsglob.h +++ b/trunk/fs/cifs/cifsglob.h @@ -57,6 +57,14 @@ #include "cifspdu.h" +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + #ifndef XATTR_DOS_ATTRIB #define XATTR_DOS_ATTRIB "user.DOSATTRIB" #endif @@ -139,7 +147,7 @@ struct TCP_Server_Info { enum protocolEnum protocolType; char versionMajor; char versionMinor; - bool svlocal:1; /* local server or remote */ + unsigned svlocal:1; /* local server or remote */ atomic_t socketUseCount; /* number of open cifs sessions on socket */ atomic_t inFlight; /* number of requests on the wire to server */ #ifdef CONFIG_CIFS_STATS2 @@ -278,10 +286,10 @@ struct cifsTconInfo { FILE_SYSTEM_DEVICE_INFO fsDevInfo; FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ FILE_SYSTEM_UNIX_INFO fsUnixInfo; - bool ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */ - bool retry:1; - bool nocase:1; - bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol + unsigned ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */ + unsigned retry:1; + unsigned nocase:1; + unsigned unix_ext:1; /* if off disable Linux extensions to CIFS protocol for this mount even if server would support */ /* BB add field for back pointer to sb struct(s)? */ }; @@ -309,10 +317,10 @@ struct cifs_search_info { char *srch_entries_start; char *presume_name; unsigned int resume_name_len; - bool endOfSearch:1; - bool emptyDir:1; - bool unicode:1; - bool smallBuf:1; /* so we know which buf_release function to call */ + unsigned endOfSearch:1; + unsigned emptyDir:1; + unsigned unicode:1; + unsigned smallBuf:1; /* so we know which buf_release function to call */ }; struct cifsFileInfo { @@ -327,9 +335,9 @@ struct cifsFileInfo { struct inode *pInode; /* needed for oplock break */ struct mutex lock_mutex; struct list_head llist; /* list of byte range locks we have. */ - bool closePend:1; /* file is marked to close */ - bool invalidHandle:1; /* file closed via session abend */ - bool messageMode:1; /* for pipes: message vs byte mode */ + unsigned closePend:1; /* file is marked to close */ + unsigned invalidHandle:1; /* file closed via session abend */ + unsigned messageMode:1; /* for pipes: message vs byte mode */ atomic_t wrtPending; /* handle in use - defer close */ struct semaphore fh_sem; /* prevents reopen race after dead ses*/ char *search_resume_name; /* BB removeme BB */ @@ -348,9 +356,9 @@ struct cifsInodeInfo { __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ atomic_t inUse; /* num concurrent users (local openers cifs) of file*/ unsigned long time; /* jiffies of last update/check of inode */ - bool clientCanCacheRead:1; /* read oplock */ - bool clientCanCacheAll:1; /* read and writebehind oplock */ - bool oplockPending:1; + unsigned clientCanCacheRead:1; /* read oplock */ + unsigned clientCanCacheAll:1; /* read and writebehind oplock */ + unsigned oplockPending:1; struct inode vfs_inode; }; @@ -418,9 +426,9 @@ struct mid_q_entry { struct smb_hdr *resp_buf; /* response buffer */ int midState; /* wish this were enum but can not pass to wait_event */ __u8 command; /* smb command code */ - bool largeBuf:1; /* if valid response, is pointer to large buf */ - bool multiRsp:1; /* multiple trans2 responses for one request */ - bool multiEnd:1; /* both received */ + unsigned largeBuf:1; /* if valid response, is pointer to large buf */ + unsigned multiRsp:1; /* multiple trans2 responses for one request */ + unsigned multiEnd:1; /* both received */ }; struct oplock_q_entry { diff --git a/trunk/fs/cifs/cifspdu.h b/trunk/fs/cifs/cifspdu.h index a0d26b540d4e..9f49c2f3582c 100644 --- a/trunk/fs/cifs/cifspdu.h +++ b/trunk/fs/cifs/cifspdu.h @@ -2050,7 +2050,7 @@ typedef struct { to 0xFFFF00 */ #define CIFS_UNIX_LARGE_WRITE_CAP 0x00000080 #define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */ -#define CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP 0x00000200 /* must do */ +#define CIFS_UNIX_TRANPSORT_ENCRYPTION_MANDATORY_CAP 0x00000200 /* must do */ #define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and QFS PROXY call */ #ifdef CONFIG_CIFS_POSIX diff --git a/trunk/fs/cifs/cifsproto.h b/trunk/fs/cifs/cifsproto.h index d481f6c5a2be..50f9fdae19b3 100644 --- a/trunk/fs/cifs/cifsproto.h +++ b/trunk/fs/cifs/cifsproto.h @@ -59,9 +59,8 @@ extern int SendReceiveBlockingLock(const unsigned int xid, struct smb_hdr *out_buf, int *bytes_returned); extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); -extern bool is_valid_oplock_break(struct smb_hdr *smb, - struct TCP_Server_Info *); -extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); +extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); +extern int is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *); #ifdef CONFIG_CIFS_EXPERIMENTAL extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *); @@ -70,7 +69,7 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr); extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); extern int decode_negTokenInit(unsigned char *security_blob, int length, enum securityEnum *secType); -extern int cifs_inet_pton(const int, const char *source, void *dst); +extern int cifs_inet_pton(int, char *source, void *dst); extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); extern void header_assemble(struct smb_hdr *, char /* command */ , const struct cifsTconInfo *, int /* length of @@ -188,12 +187,12 @@ extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, #endif /* possibly unneeded function */ extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, __u64 size, - bool setAllocationSizeFlag, + int setAllocationSizeFlag, const struct nls_table *nls_codepage, int remap_special_chars); extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, __u16 fileHandle, __u32 opener_pid, - bool AllocSizeFlag); + int AllocSizeFlag); extern int CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *pTcon, char *full_path, __u64 mode, __u64 uid, __u64 gid, dev_t dev, @@ -292,11 +291,11 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 netfid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, - const bool waitFlag); + const int waitFlag); extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *, - const __u16 lock_type, const bool waitFlag); + const __u16 lock_type, const int waitFlag); extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses); diff --git a/trunk/fs/cifs/cifssmb.c b/trunk/fs/cifs/cifssmb.c index cfd9750852b3..4728fa982a4e 100644 --- a/trunk/fs/cifs/cifssmb.c +++ b/trunk/fs/cifs/cifssmb.c @@ -95,7 +95,7 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon) list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); if (open_file) - open_file->invalidHandle = true; + open_file->invalidHandle = TRUE; } write_unlock(&GlobalSMBSeslock); /* BB Add call to invalidate_inodes(sb) for all superblocks mounted @@ -141,7 +141,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, if (tcon->ses->server->tcpStatus == CifsNeedReconnect) { /* on "soft" mounts we wait once */ - if (!tcon->retry || + if ((tcon->retry == FALSE) || (tcon->ses->status == CifsExiting)) { cFYI(1, ("gave up waiting on " "reconnect in smb_init")); @@ -289,7 +289,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, if (tcon->ses->server->tcpStatus == CifsNeedReconnect) { /* on "soft" mounts we wait once */ - if (!tcon->retry || + if ((tcon->retry == FALSE) || (tcon->ses->status == CifsExiting)) { cFYI(1, ("gave up waiting on " "reconnect in smb_init")); @@ -1686,7 +1686,7 @@ int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, - const __u32 numLock, const __u8 lockType, const bool waitFlag) + const __u32 numLock, const __u8 lockType, const int waitFlag) { int rc = 0; LOCK_REQ *pSMB = NULL; @@ -1695,7 +1695,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, int timeout = 0; __u16 count; - cFYI(1, ("CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock)); + cFYI(1, ("CIFSSMBLock timeout %d numLock %d", waitFlag, numLock)); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) @@ -1706,7 +1706,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { timeout = CIFS_ASYNC_OP; /* no response expected */ pSMB->Timeout = 0; - } else if (waitFlag) { + } else if (waitFlag == TRUE) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ } else { @@ -1756,7 +1756,7 @@ int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *pLockData, const __u16 lock_type, - const bool waitFlag) + const int waitFlag) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; @@ -3581,9 +3581,9 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) - psrch_inf->unicode = true; + psrch_inf->unicode = TRUE; else - psrch_inf->unicode = false; + psrch_inf->unicode = FALSE; psrch_inf->ntwrk_buf_start = (char *)pSMBr; psrch_inf->smallBuf = 0; @@ -3594,9 +3594,9 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, le16_to_cpu(pSMBr->t2.ParameterOffset)); if (parms->EndofSearch) - psrch_inf->endOfSearch = true; + psrch_inf->endOfSearch = TRUE; else - psrch_inf->endOfSearch = false; + psrch_inf->endOfSearch = FALSE; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); @@ -3624,7 +3624,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, cFYI(1, ("In FindNext")); - if (psrch_inf->endOfSearch) + if (psrch_inf->endOfSearch == TRUE) return -ENOENT; rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, @@ -3682,7 +3682,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, cifs_stats_inc(&tcon->num_fnext); if (rc) { if (rc == -EBADF) { - psrch_inf->endOfSearch = true; + psrch_inf->endOfSearch = TRUE; rc = 0; /* search probably was closed at end of search*/ } else cFYI(1, ("FindNext returned = %d", rc)); @@ -3692,9 +3692,9 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, if (rc == 0) { /* BB fixme add lock for file (srch_info) struct here */ if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) - psrch_inf->unicode = true; + psrch_inf->unicode = TRUE; else - psrch_inf->unicode = false; + psrch_inf->unicode = FALSE; response_data = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset); parms = (T2_FNEXT_RSP_PARMS *)response_data; @@ -3709,9 +3709,9 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, psrch_inf->ntwrk_buf_start = (char *)pSMB; psrch_inf->smallBuf = 0; if (parms->EndofSearch) - psrch_inf->endOfSearch = true; + psrch_inf->endOfSearch = TRUE; else - psrch_inf->endOfSearch = false; + psrch_inf->endOfSearch = FALSE; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry += @@ -4586,7 +4586,7 @@ CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, - __u64 size, bool SetAllocation, + __u64 size, int SetAllocation, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; @@ -4675,7 +4675,7 @@ CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, - __u16 fid, __u32 pid_of_opener, bool SetAllocation) + __u16 fid, __u32 pid_of_opener, int SetAllocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 791ca5c1a116..e17106730168 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -49,6 +49,8 @@ #define CIFS_PORT 445 #define RFC1001_PORT 139 +static DECLARE_COMPLETION(cifsd_complete); + extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); @@ -69,23 +71,23 @@ struct smb_vol { mode_t file_mode; mode_t dir_mode; unsigned secFlg; - bool rw:1; - bool retry:1; - bool intr:1; - bool setuids:1; - bool override_uid:1; - bool override_gid:1; - bool noperm:1; - bool no_psx_acl:1; /* set if posix acl support should be disabled */ - bool cifs_acl:1; - bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ - bool server_ino:1; /* use inode numbers from server ie UniqueId */ - bool direct_io:1; - bool remap:1; /* set to remap seven reserved chars in filenames */ - bool posix_paths:1; /* unset to not ask for posix pathnames. */ - bool no_linux_ext:1; - bool sfu_emul:1; - bool nullauth:1; /* attempt to authenticate with null user */ + unsigned rw:1; + unsigned retry:1; + unsigned intr:1; + unsigned setuids:1; + unsigned override_uid:1; + unsigned override_gid:1; + unsigned noperm:1; + unsigned no_psx_acl:1; /* set if posix acl support should be disabled */ + unsigned cifs_acl:1; + unsigned no_xattr:1; /* set if xattr (EA) support should be disabled*/ + unsigned server_ino:1; /* use inode numbers from server ie UniqueId */ + unsigned direct_io:1; + unsigned remap:1; /* set to remap seven reserved chars in filenames */ + unsigned posix_paths:1; /* unset to not ask for posix pathnames. */ + unsigned no_linux_ext:1; + unsigned sfu_emul:1; + unsigned nullauth:1; /* attempt to authenticate with null user */ unsigned nocase; /* request case insensitive filenames */ unsigned nobrl; /* disable sending byte range locks to srv */ unsigned int rsize; @@ -343,8 +345,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; char temp; - bool isLargeBuf = false; - bool isMultiRsp; + int isLargeBuf = FALSE; + int isMultiRsp; int reconnect; current->flags |= PF_MEMALLOC; @@ -354,6 +356,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) atomic_inc(&tcpSesAllocCount); length = tcpSesAllocCount.counter; write_unlock(&GlobalSMBSeslock); + complete(&cifsd_complete); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); @@ -387,8 +390,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) } else /* if existing small buf clear beginning */ memset(smallbuf, 0, sizeof(struct smb_hdr)); - isLargeBuf = false; - isMultiRsp = false; + isLargeBuf = FALSE; + isMultiRsp = FALSE; smb_buffer = smallbuf; iov.iov_base = smb_buffer; iov.iov_len = 4; @@ -514,7 +517,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) reconnect = 0; if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { - isLargeBuf = true; + isLargeBuf = TRUE; memcpy(bigbuf, smallbuf, 4); smb_buffer = bigbuf; } @@ -579,18 +582,16 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) (mid_entry->command == smb_buffer->Command)) { if (check2ndT2(smb_buffer,server->maxBuf) > 0) { /* We have a multipart transact2 resp */ - isMultiRsp = true; + isMultiRsp = TRUE; if (mid_entry->resp_buf) { /* merge response - fix up 1st*/ if (coalesce_t2(smb_buffer, mid_entry->resp_buf)) { - mid_entry->multiRsp = - true; + mid_entry->multiRsp = 1; break; } else { /* all parts received */ - mid_entry->multiEnd = - true; + mid_entry->multiEnd = 1; goto multi_t2_fnd; } } else { @@ -602,15 +603,17 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) /* Have first buffer */ mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = - true; + mid_entry->largeBuf = 1; bigbuf = NULL; } } break; } mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = isLargeBuf; + if (isLargeBuf) + mid_entry->largeBuf = 1; + else + mid_entry->largeBuf = 0; multi_t2_fnd: task_to_wake = mid_entry->tsk; mid_entry->midState = MID_RESPONSE_RECEIVED; @@ -635,8 +638,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) smallbuf = NULL; } wake_up_process(task_to_wake); - } else if (!is_valid_oplock_break(smb_buffer, server) && - !isMultiRsp) { + } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE) + && (isMultiRsp == FALSE)) { cERROR(1, ("No task to wake, unknown frame received! " "NumMids %d", midCount.counter)); cifs_dump_mem("Received Data is: ", (char *)smb_buffer, @@ -822,7 +825,7 @@ cifs_parse_mount_options(char *options, const char *devname, vol->file_mode = (S_IRWXUGO | S_ISGID) & (~S_IXGRP); /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ - vol->rw = true; + vol->rw = TRUE; /* default is always to request posix paths. */ vol->posix_paths = 1; @@ -1178,7 +1181,7 @@ cifs_parse_mount_options(char *options, const char *devname, } else if (strnicmp(data, "guest", 5) == 0) { /* ignore */ } else if (strnicmp(data, "rw", 2) == 0) { - vol->rw = true; + vol->rw = TRUE; } else if ((strnicmp(data, "suid", 4) == 0) || (strnicmp(data, "nosuid", 6) == 0) || (strnicmp(data, "exec", 4) == 0) || @@ -1194,7 +1197,7 @@ cifs_parse_mount_options(char *options, const char *devname, is ok to just ignore them */ continue; } else if (strnicmp(data, "ro", 2) == 0) { - vol->rw = false; + vol->rw = FALSE; } else if (strnicmp(data, "hard", 4) == 0) { vol->retry = 1; } else if (strnicmp(data, "soft", 4) == 0) { @@ -1302,9 +1305,6 @@ cifs_parse_mount_options(char *options, const char *devname, "begin with // or \\\\ \n"); return 1; } - value = strpbrk(vol->UNC+2, "/\\"); - if (value) - *value = '\\'; } else { printk(KERN_WARNING "CIFS: UNC name too long\n"); return 1; @@ -1362,43 +1362,45 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName) { struct list_head *tmp; struct cifsTconInfo *tcon; - __be32 old_ip; read_lock(&GlobalSMBSeslock); - list_for_each(tmp, &GlobalTreeConnectionList) { cFYI(1, ("Next tcon")); tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); - if (!tcon->ses || !tcon->ses->server) - continue; - - old_ip = tcon->ses->server->addr.sockAddr.sin_addr.s_addr; - cFYI(1, ("old ip addr: %x == new ip %x ?", - old_ip, new_target_ip_addr)); - - if (old_ip != new_target_ip_addr) - continue; - - /* BB lock tcon, server, tcp session and increment use count? */ - /* found a match on the TCP session */ - /* BB check if reconnection needed */ - cFYI(1, ("IP match, old UNC: %s new: %s", - tcon->treeName, uncName)); - - if (strncmp(tcon->treeName, uncName, MAX_TREE_SIZE)) - continue; - - cFYI(1, ("and old usr: %s new: %s", - tcon->treeName, uncName)); - - if (strncmp(tcon->ses->userName, userName, MAX_USERNAME_SIZE)) - continue; - - /* matched smb session (user name) */ - read_unlock(&GlobalSMBSeslock); - return tcon; + if (tcon->ses) { + if (tcon->ses->server) { + cFYI(1, + ("old ip addr: %x == new ip %x ?", + tcon->ses->server->addr.sockAddr.sin_addr. + s_addr, new_target_ip_addr)); + if (tcon->ses->server->addr.sockAddr.sin_addr. + s_addr == new_target_ip_addr) { + /* BB lock tcon, server and tcp session and increment use count here? */ + /* found a match on the TCP session */ + /* BB check if reconnection needed */ + cFYI(1, + ("IP match, old UNC: %s new: %s", + tcon->treeName, uncName)); + if (strncmp + (tcon->treeName, uncName, + MAX_TREE_SIZE) == 0) { + cFYI(1, + ("and old usr: %s new: %s", + tcon->treeName, uncName)); + if (strncmp + (tcon->ses->userName, + userName, + MAX_USERNAME_SIZE) == 0) { + read_unlock(&GlobalSMBSeslock); + /* matched smb session + (user name */ + return tcon; + } + } + } + } + } } - read_unlock(&GlobalSMBSeslock); return NULL; } @@ -1980,6 +1982,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, kfree(srvTcp->hostname); goto out; } + wait_for_completion(&cifsd_complete); rc = 0; memcpy(srvTcp->workstation_RFC1001_name, volume_info.source_rfc1001_name, 16); @@ -2599,7 +2602,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, static int CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, - struct cifsSesInfo *ses, bool *pNTLMv2_flag, + struct cifsSesInfo *ses, int *pNTLMv2_flag, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; @@ -2622,7 +2625,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, if (ses == NULL) return -EINVAL; domain = ses->domainName; - *pNTLMv2_flag = false; + *pNTLMv2_flag = FALSE; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) { return -ENOMEM; @@ -2775,7 +2778,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, CIFS_CRYPTO_KEY_SIZE); if (SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2)) - *pNTLMv2_flag = true; + *pNTLMv2_flag = TRUE; if ((SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN)) @@ -2936,7 +2939,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, } static int CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, - char *ntlm_session_key, bool ntlmv2_flag, + char *ntlm_session_key, int ntlmv2_flag, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; @@ -3552,6 +3555,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) cifs_sb->prepathlen = 0; cifs_sb->prepath = NULL; kfree(tmp); + if (ses) + schedule_timeout_interruptible(msecs_to_jiffies(500)); if (ses) sesInfoFree(ses); @@ -3564,7 +3569,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, { int rc = 0; char ntlm_session_key[CIFS_SESS_KEY_SIZE]; - bool ntlmv2_flag = false; + int ntlmv2_flag = FALSE; int first_time = 0; /* what if server changes its buffer size after dropping the session? */ diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index 6ed775986be9..0f5c62ba4038 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -130,7 +130,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, struct cifsFileInfo *pCifsFile = NULL; struct cifsInodeInfo *pCifsInode; int disposition = FILE_OVERWRITE_IF; - bool write_only = false; + int write_only = FALSE; xid = GetXid(); @@ -152,7 +152,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, if (oflags & FMODE_WRITE) { desiredAccess |= GENERIC_WRITE; if (!(oflags & FMODE_READ)) - write_only = true; + write_only = TRUE; } if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) @@ -254,7 +254,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, d_instantiate(direntry, newinode); } if ((nd == NULL /* nfsd case - nfs srv does not set nd */) || - (!(nd->flags & LOOKUP_OPEN))) { + ((nd->flags & LOOKUP_OPEN) == FALSE)) { /* mknod case - do not leave file open */ CIFSSMBClose(xid, pTcon, fileHandle); } else if (newinode) { @@ -266,8 +266,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, pCifsFile->netfid = fileHandle; pCifsFile->pid = current->tgid; pCifsFile->pInode = newinode; - pCifsFile->invalidHandle = false; - pCifsFile->closePend = false; + pCifsFile->invalidHandle = FALSE; + pCifsFile->closePend = FALSE; init_MUTEX(&pCifsFile->fh_sem); mutex_init(&pCifsFile->lock_mutex); INIT_LIST_HEAD(&pCifsFile->llist); @@ -280,7 +280,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, pCifsInode = CIFS_I(newinode); if (pCifsInode) { /* if readable file instance put first in list*/ - if (write_only) { + if (write_only == TRUE) { list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); } else { @@ -288,12 +288,12 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, &pCifsInode->openFileList); } if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { - pCifsInode->clientCanCacheAll = true; - pCifsInode->clientCanCacheRead = true; + pCifsInode->clientCanCacheAll = TRUE; + pCifsInode->clientCanCacheRead = TRUE; cFYI(1, ("Exclusive Oplock inode %p", newinode)); } else if ((oplock & 0xF) == OPLOCK_READ) - pCifsInode->clientCanCacheRead = true; + pCifsInode->clientCanCacheRead = TRUE; } write_unlock(&GlobalSMBSeslock); } diff --git a/trunk/fs/cifs/dns_resolve.c b/trunk/fs/cifs/dns_resolve.c index 939e256f8497..7cc86c418182 100644 --- a/trunk/fs/cifs/dns_resolve.c +++ b/trunk/fs/cifs/dns_resolve.c @@ -55,32 +55,6 @@ struct key_type key_type_dns_resolver = { .match = user_match, }; -/* Checks if supplied name is IP address - * returns: - * 1 - name is IP - * 0 - name is not IP - */ -static int is_ip(const char *name) -{ - int rc; - struct sockaddr_in sin_server; - struct sockaddr_in6 sin_server6; - - rc = cifs_inet_pton(AF_INET, name, - &sin_server.sin_addr.s_addr); - - if (rc <= 0) { - /* not ipv4 address, try ipv6 */ - rc = cifs_inet_pton(AF_INET6, name, - &sin_server6.sin6_addr.in6_u); - if (rc > 0) - return 1; - } else { - return 1; - } - /* we failed translating address */ - return 0; -} /* Resolves server name to ip address. * input: @@ -93,9 +67,8 @@ int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) { int rc = -EAGAIN; - struct key *rkey = ERR_PTR(-EAGAIN); + struct key *rkey; char *name; - char *data = NULL; int len; if (!ip_addr || !unc) @@ -124,41 +97,26 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) memcpy(name, unc+2, len); name[len] = 0; - if (is_ip(name)) { - cFYI(1, ("%s: it is IP, skipping dns upcall: %s", - __func__, name)); - data = name; - goto skip_upcall; - } - rkey = request_key(&key_type_dns_resolver, name, ""); if (!IS_ERR(rkey)) { - data = rkey->payload.data; - cFYI(1, ("%s: resolved: %s to %s", __func__, - rkey->description, - *ip_addr - )); - } else { - cERROR(1, ("%s: unable to resolve: %s", __func__, name)); - goto out; - } - -skip_upcall: - if (data) { - len = strlen(data); + len = strlen(rkey->payload.data); *ip_addr = kmalloc(len+1, GFP_KERNEL); if (*ip_addr) { - memcpy(*ip_addr, data, len); + memcpy(*ip_addr, rkey->payload.data, len); (*ip_addr)[len] = '\0'; + cFYI(1, ("%s: resolved: %s to %s", __func__, + rkey->description, + *ip_addr + )); rc = 0; } else { rc = -ENOMEM; } - if (!IS_ERR(rkey)) - key_put(rkey); + key_put(rkey); + } else { + cERROR(1, ("%s: unable to resolve: %s", __func__, name)); } -out: kfree(name); return rc; } diff --git a/trunk/fs/cifs/fcntl.c b/trunk/fs/cifs/fcntl.c index 5a57581eb4b2..7d1d5aa4c430 100644 --- a/trunk/fs/cifs/fcntl.c +++ b/trunk/fs/cifs/fcntl.c @@ -68,7 +68,7 @@ int cifs_dir_notify(struct file *file, unsigned long arg) { int xid; int rc = -EINVAL; - int oplock = 0; + int oplock = FALSE; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; char *full_path = NULL; diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index 31a0a33b9d95..40b690073fc1 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -51,8 +51,8 @@ static inline struct cifsFileInfo *cifs_init_private( INIT_LIST_HEAD(&private_data->llist); private_data->pfile = file; /* needed for writepage */ private_data->pInode = inode; - private_data->invalidHandle = false; - private_data->closePend = false; + private_data->invalidHandle = FALSE; + private_data->closePend = FALSE; /* we have to track num writers to the inode, since writepages does not tell us which handle the write is for so there can be a close (overlapping with write) of the filehandle that @@ -148,12 +148,12 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, full_path, buf, inode->i_sb, xid, NULL); if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { - pCifsInode->clientCanCacheAll = true; - pCifsInode->clientCanCacheRead = true; + pCifsInode->clientCanCacheAll = TRUE; + pCifsInode->clientCanCacheRead = TRUE; cFYI(1, ("Exclusive Oplock granted on inode %p", file->f_path.dentry->d_inode)); } else if ((*oplock & 0xF) == OPLOCK_READ) - pCifsInode->clientCanCacheRead = true; + pCifsInode->clientCanCacheRead = TRUE; return rc; } @@ -247,7 +247,7 @@ int cifs_open(struct inode *inode, struct file *file) if (oplockEnabled) oplock = REQ_OPLOCK; else - oplock = 0; + oplock = FALSE; /* BB pass O_SYNC flag through on file attributes .. BB */ @@ -339,7 +339,7 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile) return rc; } -static int cifs_reopen_file(struct file *file, bool can_flush) +static int cifs_reopen_file(struct file *file, int can_flush) { int rc = -EACCES; int xid, oplock; @@ -360,7 +360,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) xid = GetXid(); down(&pCifsFile->fh_sem); - if (!pCifsFile->invalidHandle) { + if (pCifsFile->invalidHandle == FALSE) { up(&pCifsFile->fh_sem); FreeXid(xid); return 0; @@ -404,7 +404,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) if (oplockEnabled) oplock = REQ_OPLOCK; else - oplock = 0; + oplock = FALSE; /* Can not refresh inode by passing in file_info buf to be returned by SMBOpen and then calling get_inode_info with returned buf @@ -422,7 +422,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) cFYI(1, ("oplock: %d", oplock)); } else { pCifsFile->netfid = netfid; - pCifsFile->invalidHandle = false; + pCifsFile->invalidHandle = FALSE; up(&pCifsFile->fh_sem); pCifsInode = CIFS_I(inode); if (pCifsInode) { @@ -432,8 +432,8 @@ static int cifs_reopen_file(struct file *file, bool can_flush) CIFS_I(inode)->write_behind_rc = rc; /* temporarily disable caching while we go to server to get inode info */ - pCifsInode->clientCanCacheAll = false; - pCifsInode->clientCanCacheRead = false; + pCifsInode->clientCanCacheAll = FALSE; + pCifsInode->clientCanCacheRead = FALSE; if (pTcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); @@ -448,16 +448,16 @@ static int cifs_reopen_file(struct file *file, bool can_flush) we can not go to the server to get the new inod info */ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { - pCifsInode->clientCanCacheAll = true; - pCifsInode->clientCanCacheRead = true; + pCifsInode->clientCanCacheAll = TRUE; + pCifsInode->clientCanCacheRead = TRUE; cFYI(1, ("Exclusive Oplock granted on inode %p", file->f_path.dentry->d_inode)); } else if ((oplock & 0xF) == OPLOCK_READ) { - pCifsInode->clientCanCacheRead = true; - pCifsInode->clientCanCacheAll = false; + pCifsInode->clientCanCacheRead = TRUE; + pCifsInode->clientCanCacheAll = FALSE; } else { - pCifsInode->clientCanCacheRead = false; - pCifsInode->clientCanCacheAll = false; + pCifsInode->clientCanCacheRead = FALSE; + pCifsInode->clientCanCacheAll = FALSE; } cifs_relock_file(pCifsFile); } @@ -484,7 +484,7 @@ int cifs_close(struct inode *inode, struct file *file) if (pSMBFile) { struct cifsLockInfo *li, *tmp; - pSMBFile->closePend = true; + pSMBFile->closePend = TRUE; if (pTcon) { /* no sense reconnecting to close a file that is already closed */ @@ -553,8 +553,8 @@ int cifs_close(struct inode *inode, struct file *file) cFYI(1, ("closing last open instance for inode %p", inode)); /* if the file is not open we do not know if we can cache info on this inode, much less write behind and read ahead */ - CIFS_I(inode)->clientCanCacheRead = false; - CIFS_I(inode)->clientCanCacheAll = false; + CIFS_I(inode)->clientCanCacheRead = FALSE; + CIFS_I(inode)->clientCanCacheAll = FALSE; } read_unlock(&GlobalSMBSeslock); if ((rc == 0) && CIFS_I(inode)->write_behind_rc) @@ -583,9 +583,9 @@ int cifs_closedir(struct inode *inode, struct file *file) pTcon = cifs_sb->tcon; cFYI(1, ("Freeing private data in close dir")); - if (!pCFileStruct->srch_inf.endOfSearch && - !pCFileStruct->invalidHandle) { - pCFileStruct->invalidHandle = true; + if ((pCFileStruct->srch_inf.endOfSearch == FALSE) && + (pCFileStruct->invalidHandle == FALSE)) { + pCFileStruct->invalidHandle = TRUE; rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); cFYI(1, ("Closing uncompleted readdir with rc %d", rc)); @@ -637,12 +637,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) __u32 numLock = 0; __u32 numUnlock = 0; __u64 length; - bool wait_flag = false; + int wait_flag = FALSE; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; __u16 netfid; __u8 lockType = LOCKING_ANDX_LARGE_FILES; - bool posix_locking; + int posix_locking; length = 1 + pfLock->fl_end - pfLock->fl_start; rc = -EACCES; @@ -659,7 +659,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) cFYI(1, ("Flock")); if (pfLock->fl_flags & FL_SLEEP) { cFYI(1, ("Blocking lock")); - wait_flag = true; + wait_flag = TRUE; } if (pfLock->fl_flags & FL_ACCESS) cFYI(1, ("Process suspended by mandatory locking - " @@ -794,7 +794,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) stored_rc = CIFSSMBLock(xid, pTcon, netfid, li->length, li->offset, - 1, 0, li->type, false); + 1, 0, li->type, FALSE); if (stored_rc) rc = stored_rc; @@ -866,7 +866,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, filemap_fdatawait from here so tell reopen_file not to flush data to server now */ - rc = cifs_reopen_file(file, false); + rc = cifs_reopen_file(file, FALSE); if (rc != 0) break; } @@ -966,7 +966,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data, filemap_fdatawait from here so tell reopen_file not to flush data to server now */ - rc = cifs_reopen_file(file, false); + rc = cifs_reopen_file(file, FALSE); if (rc != 0) break; } @@ -1093,7 +1093,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode) read_unlock(&GlobalSMBSeslock); /* Had to unlock since following call can block */ - rc = cifs_reopen_file(open_file->pfile, false); + rc = cifs_reopen_file(open_file->pfile, FALSE); if (!rc) { if (!open_file->closePend) return open_file; @@ -1608,7 +1608,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, int buf_type = CIFS_NO_BUFFER; if ((open_file->invalidHandle) && (!open_file->closePend)) { - rc = cifs_reopen_file(file, true); + rc = cifs_reopen_file(file, TRUE); if (rc != 0) break; } @@ -1693,7 +1693,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { - rc = cifs_reopen_file(file, true); + rc = cifs_reopen_file(file, TRUE); if (rc != 0) break; } @@ -1850,7 +1850,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { - rc = cifs_reopen_file(file, true); + rc = cifs_reopen_file(file, TRUE); if (rc != 0) break; } @@ -2009,10 +2009,10 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode) refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */ -bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) +int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) { if (!cifsInode) - return true; + return 1; if (is_inode_writable(cifsInode)) { /* This inode is open for write at least once */ @@ -2022,15 +2022,15 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ - return true; + return 1; } if (i_size_read(&cifsInode->vfs_inode) < end_of_file) - return true; + return 1; - return false; + return 0; } else - return true; + return 1; } static int cifs_prepare_write(struct file *file, struct page *page, diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index 0d9d2e6d7af6..e1031b9e2c55 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -281,7 +281,7 @@ static int decode_sfu_inode(struct inode *inode, __u64 size, struct cifs_sb_info *cifs_sb, int xid) { int rc; - int oplock = 0; + int oplock = FALSE; __u16 netfid; struct cifsTconInfo *pTcon = cifs_sb->tcon; char buf[24]; @@ -389,7 +389,7 @@ int cifs_get_inode_info(struct inode **pinode, struct cifs_sb_info *cifs_sb = CIFS_SB(sb); const unsigned char *full_path = NULL; char *buf = NULL; - bool adjustTZ = false; + int adjustTZ = FALSE; bool is_dfs_referral = false; pTcon = cifs_sb->tcon; @@ -425,7 +425,7 @@ int cifs_get_inode_info(struct inode **pinode, pfindData, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - adjustTZ = true; + adjustTZ = TRUE; } } /* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */ @@ -703,7 +703,7 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) } else if (rc == -ENOENT) { d_drop(direntry); } else if (rc == -ETXTBSY) { - int oplock = 0; + int oplock = FALSE; __u16 netfid; rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE, @@ -736,7 +736,7 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) rc = -EOPNOTSUPP; if (rc == -EOPNOTSUPP) { - int oplock = 0; + int oplock = FALSE; __u16 netfid; /* rc = CIFSSMBSetAttrLegacy(xid, pTcon, full_path, @@ -774,7 +774,7 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) if (direntry->d_inode) drop_nlink(direntry->d_inode); } else if (rc == -ETXTBSY) { - int oplock = 0; + int oplock = FALSE; __u16 netfid; rc = CIFSSMBOpen(xid, pTcon, full_path, @@ -1149,7 +1149,7 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry, cFYI(1, ("rename rc %d", rc)); if ((rc == -EIO) || (rc == -EEXIST)) { - int oplock = 0; + int oplock = FALSE; __u16 netfid; /* BB FIXME Is Generic Read correct for rename? */ @@ -1186,7 +1186,7 @@ int cifs_revalidate(struct dentry *direntry) struct cifsInodeInfo *cifsInode; loff_t local_size; struct timespec local_mtime; - bool invalidate_inode = false; + int invalidate_inode = FALSE; if (direntry->d_inode == NULL) return -ENOENT; @@ -1268,7 +1268,7 @@ int cifs_revalidate(struct dentry *direntry) only ones who could have modified the file and the server copy is staler than ours */ } else { - invalidate_inode = true; + invalidate_inode = TRUE; } } @@ -1402,8 +1402,8 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) int rc = -EACCES; struct cifsFileInfo *open_file = NULL; FILE_BASIC_INFO time_buf; - bool set_time = false; - bool set_dosattr = false; + int set_time = FALSE; + int set_dosattr = FALSE; __u64 mode = 0xFFFFFFFFFFFFFFFFULL; __u64 uid = 0xFFFFFFFFFFFFFFFFULL; __u64 gid = 0xFFFFFFFFFFFFFFFFULL; @@ -1464,7 +1464,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) __u16 nfid = open_file->netfid; __u32 npid = open_file->pid; rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, - nfid, npid, false); + nfid, npid, FALSE); atomic_dec(&open_file->wrtPending); cFYI(1, ("SetFSize for attrs rc = %d", rc)); if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { @@ -1484,14 +1484,14 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) it was found or because there was an error setting it by handle */ rc = CIFSSMBSetEOF(xid, pTcon, full_path, - attrs->ia_size, false, + attrs->ia_size, FALSE, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc)); if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { __u16 netfid; - int oplock = 0; + int oplock = FALSE; rc = SMBLegacyOpen(xid, pTcon, full_path, FILE_OPEN, @@ -1516,7 +1516,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) /* Server is ok setting allocation size implicitly - no need to call: - CIFSSMBSetEOF(xid, pTcon, full_path, attrs->ia_size, true, + CIFSSMBSetEOF(xid, pTcon, full_path, attrs->ia_size, TRUE, cifs_sb->local_nls); */ @@ -1564,7 +1564,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) #endif /* not writeable */ if ((cifsInode->cifsAttrs & ATTR_READONLY) == 0) { - set_dosattr = true; + set_dosattr = TRUE; time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs | ATTR_READONLY); @@ -1574,24 +1574,28 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) not be able to write to it - so if any write bit is enabled for user or group or other we need to at least try to remove r/o dos attr */ - set_dosattr = true; + set_dosattr = TRUE; time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs & (~ATTR_READONLY)); /* Windows ignores set to zero */ if (time_buf.Attributes == 0) time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL); } +#ifdef CONFIG_CIFS_EXPERIMENTAL + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) + mode_to_acl(direntry->d_inode, full_path, mode); +#endif } if (attrs->ia_valid & ATTR_ATIME) { - set_time = true; + set_time = TRUE; time_buf.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime)); } else time_buf.LastAccessTime = 0; if (attrs->ia_valid & ATTR_MTIME) { - set_time = true; + set_time = TRUE; time_buf.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime)); } else @@ -1602,7 +1606,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) server times */ if (set_time && (attrs->ia_valid & ATTR_CTIME)) { - set_time = true; + set_time = TRUE; /* Although Samba throws this field away it may be useful to Windows - but we do not want to set ctime unless some other @@ -1626,7 +1630,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) rc = -EOPNOTSUPP; if (rc == -EOPNOTSUPP) { - int oplock = 0; + int oplock = FALSE; __u16 netfid; cFYI(1, ("calling SetFileInfo since SetPathInfo for " diff --git a/trunk/fs/cifs/link.c b/trunk/fs/cifs/link.c index 1c2c3ce5020b..d4e7ec93285f 100644 --- a/trunk/fs/cifs/link.c +++ b/trunk/fs/cifs/link.c @@ -230,7 +230,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) struct inode *inode = direntry->d_inode; int rc = -EACCES; int xid; - int oplock = 0; + int oplock = FALSE; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; char *full_path = NULL; diff --git a/trunk/fs/cifs/misc.c b/trunk/fs/cifs/misc.c index 1d69b8014e0b..2a42d9fedbb2 100644 --- a/trunk/fs/cifs/misc.c +++ b/trunk/fs/cifs/misc.c @@ -496,8 +496,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) } return 0; } - -bool +int is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) { struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; @@ -523,17 +522,17 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) pnotify->Action)); /* BB removeme BB */ /* cifs_dump_mem("Rcvd notify Data: ",buf, sizeof(struct smb_hdr)+60); */ - return true; + return TRUE; } if (pSMBr->hdr.Status.CifsError) { cFYI(1, ("notify err 0x%d", pSMBr->hdr.Status.CifsError)); - return true; + return TRUE; } - return false; + return FALSE; } if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) - return false; + return FALSE; if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { /* no sense logging error on invalid handle on oplock break - harmless race between close request and oplock @@ -542,21 +541,21 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) if ((NT_STATUS_INVALID_HANDLE) == le32_to_cpu(pSMB->hdr.Status.CifsError)) { cFYI(1, ("invalid handle on oplock break")); - return true; + return TRUE; } else if (ERRbadfid == le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { - return true; + return TRUE; } else { - return false; /* on valid oplock brk we get "request" */ + return FALSE; /* on valid oplock brk we get "request" */ } } if (pSMB->hdr.WordCount != 8) - return false; + return FALSE; cFYI(1, ("oplock type 0x%d level 0x%d", pSMB->LockType, pSMB->OplockLevel)); if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) - return false; + return FALSE; /* look up tcon based on tid & uid */ read_lock(&GlobalSMBSeslock); @@ -574,11 +573,11 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) ("file id match, oplock break")); pCifsInode = CIFS_I(netfile->pInode); - pCifsInode->clientCanCacheAll = false; + pCifsInode->clientCanCacheAll = FALSE; if (pSMB->OplockLevel == 0) pCifsInode->clientCanCacheRead - = false; - pCifsInode->oplockPending = true; + = FALSE; + pCifsInode->oplockPending = TRUE; AllocOplockQEntry(netfile->pInode, netfile->netfid, tcon); @@ -586,17 +585,17 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) ("about to wake up oplock thread")); if (oplockThread) wake_up_process(oplockThread); - return true; + return TRUE; } } read_unlock(&GlobalSMBSeslock); cFYI(1, ("No matching file for oplock break")); - return true; + return TRUE; } } read_unlock(&GlobalSMBSeslock); cFYI(1, ("Can not process oplock break for non-existent connection")); - return true; + return TRUE; } void diff --git a/trunk/fs/cifs/netmisc.c b/trunk/fs/cifs/netmisc.c index 00f4cff400b3..3b5a5ce882b6 100644 --- a/trunk/fs/cifs/netmisc.c +++ b/trunk/fs/cifs/netmisc.c @@ -132,17 +132,47 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = { {0, 0} }; + +/* if the mount helper is missing we need to reverse the 1st slash + from '/' to backslash in order to format the UNC properly for + ip address parsing and for tree connect (unless the user + remembered to put the UNC name in properly). Fortunately we do + not have to call this twice (we check for IPv4 addresses + first, so it is already converted by the time we + try IPv6 addresses */ +static int canonicalize_unc(char *cp) +{ + int i; + + for (i = 0; i <= 46 /* INET6_ADDRSTRLEN */ ; i++) { + if (cp[i] == 0) + break; + if (cp[i] == '\\') + break; + if (cp[i] == '/') { + cFYI(DBG2, ("change slash to \\ in malformed UNC")); + cp[i] = '\\'; + return 1; + } + } + return 0; +} + /* Convert string containing dotted ip address to binary form */ /* returns 0 if invalid address */ int -cifs_inet_pton(const int address_family, const char *cp, void *dst) +cifs_inet_pton(int address_family, char *cp, void *dst) { int ret = 0; /* calculate length by finding first slash or NULL */ if (address_family == AF_INET) { ret = in4_pton(cp, -1 /* len */, dst, '\\', NULL); + if (ret == 0) { + if (canonicalize_unc(cp)) + ret = in4_pton(cp, -1, dst, '\\', NULL); + } } else if (address_family == AF_INET6) { ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL); } diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c index 34ec32100c72..32b445edc882 100644 --- a/trunk/fs/cifs/readdir.c +++ b/trunk/fs/cifs/readdir.c @@ -447,8 +447,8 @@ static int initiate_cifs_search(const int xid, struct file *file) if (file->private_data == NULL) return -ENOMEM; cifsFile = file->private_data; - cifsFile->invalidHandle = true; - cifsFile->srch_inf.endOfSearch = false; + cifsFile->invalidHandle = TRUE; + cifsFile->srch_inf.endOfSearch = FALSE; cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); if (cifs_sb == NULL) @@ -485,7 +485,7 @@ static int initiate_cifs_search(const int xid, struct file *file) cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb)); if (rc == 0) - cifsFile->invalidHandle = false; + cifsFile->invalidHandle = FALSE; if ((rc == -EOPNOTSUPP) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; @@ -670,7 +670,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, (index_to_find < first_entry_in_buffer)) { /* close and restart search */ cFYI(1, ("search backing up - close and restart search")); - cifsFile->invalidHandle = true; + cifsFile->invalidHandle = TRUE; CIFSFindClose(xid, pTcon, cifsFile->netfid); kfree(cifsFile->search_resume_name); cifsFile->search_resume_name = NULL; @@ -692,7 +692,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, } while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && - (rc == 0) && !cifsFile->srch_inf.endOfSearch) { + (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)) { cFYI(1, ("calling findnext2")); rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, &cifsFile->srch_inf); @@ -1038,7 +1038,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) break; } } /* else { - cifsFile->invalidHandle = true; + cifsFile->invalidHandle = TRUE; CIFSFindClose(xid, pTcon, cifsFile->netfid); } kfree(cifsFile->search_resume_name); diff --git a/trunk/fs/cifs/smbencrypt.c b/trunk/fs/cifs/smbencrypt.c index ff3232fa1015..58bbfd992cc0 100644 --- a/trunk/fs/cifs/smbencrypt.c +++ b/trunk/fs/cifs/smbencrypt.c @@ -35,11 +35,11 @@ #include "cifs_debug.h" #include "cifsencrypt.h" -#ifndef false -#define false 0 +#ifndef FALSE +#define FALSE 0 #endif -#ifndef true -#define true 1 +#ifndef TRUE +#define TRUE 1 #endif /* following came from the other byteorder.h to avoid include conflicts */ diff --git a/trunk/fs/cifs/xattr.c b/trunk/fs/cifs/xattr.c index e9527eedc639..8cd6a445b017 100644 --- a/trunk/fs/cifs/xattr.c +++ b/trunk/fs/cifs/xattr.c @@ -264,7 +264,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, #ifdef CONFIG_CIFS_EXPERIMENTAL else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { __u16 fid; - int oplock = 0; + int oplock = FALSE; struct cifs_ntsd *pacl = NULL; __u32 buflen = 0; if (experimEnabled) diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index c36d9480335c..bf6478130424 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -1149,8 +1149,13 @@ static inline void iput_final(struct inode *inode) void iput(struct inode *inode) { if (inode) { + const struct super_operations *op = inode->i_sb->s_op; + BUG_ON(inode->i_state == I_CLEAR); + if (op && op->put_inode) + op->put_inode(inode); + if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) iput_final(inode); } diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 0ac6b92cb0b6..663c069b59b3 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -1753,7 +1753,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock flock; struct inode *inode; - struct file *f; int error; if (file_lock == NULL) @@ -1826,15 +1825,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - /* - * we need that spin_lock here - it prevents reordering between - * update of inode->i_flock and check for it done in close(). - * rcu_read_lock() wouldn't do. - */ - spin_lock(¤t->files->file_lock); - f = fcheck(fd); - spin_unlock(¤t->files->file_lock); - if (!error && f != filp && flock.l_type != F_UNLCK) { + if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1890,7 +1881,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock64 flock; struct inode *inode; - struct file *f; int error; if (file_lock == NULL) @@ -1963,10 +1953,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - spin_lock(¤t->files->file_lock); - f = fcheck(fd); - spin_unlock(¤t->files->file_lock); - if (!error && f != filp && flock.l_type != F_UNLCK) { + if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } diff --git a/trunk/fs/pipe.c b/trunk/fs/pipe.c index ec228bc9f882..3499f9ff6316 100644 --- a/trunk/fs/pipe.c +++ b/trunk/fs/pipe.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -1087,11 +1086,8 @@ asmlinkage long __weak sys_pipe(int __user *fildes) error = do_pipe(fd); if (!error) { - if (copy_to_user(fildes, fd, sizeof(fd))) { - sys_close(fd[0]); - sys_close(fd[1]); + if (copy_to_user(fildes, fd, sizeof(fd))) error = -EFAULT; - } } return error; } diff --git a/trunk/fs/proc/task_mmu.c b/trunk/fs/proc/task_mmu.c index 88717c0f941b..e2b8e769f510 100644 --- a/trunk/fs/proc/task_mmu.c +++ b/trunk/fs/proc/task_mmu.c @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include diff --git a/trunk/fs/proc/task_nommu.c b/trunk/fs/proc/task_nommu.c index 4b4f9cc2f186..4b733f108455 100644 --- a/trunk/fs/proc/task_nommu.c +++ b/trunk/fs/proc/task_nommu.c @@ -1,7 +1,6 @@ #include #include -#include #include #include #include diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c index 78150038b584..633f58ebfb72 100644 --- a/trunk/fs/splice.c +++ b/trunk/fs/splice.c @@ -811,19 +811,24 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; - struct splice_desc sd = { - .total_len = len, - .flags = flags, - .pos = *ppos, - .u.file = out, - }; + int killsuid, killpriv; ssize_t ret; + int err = 0; + + killpriv = security_inode_need_killpriv(out->f_path.dentry); + killsuid = should_remove_suid(out->f_path.dentry); + if (unlikely(killsuid || killpriv)) { + mutex_lock(&inode->i_mutex); + if (killpriv) + err = security_inode_killpriv(out->f_path.dentry); + if (!err && killsuid) + err = __remove_suid(out->f_path.dentry, killsuid); + mutex_unlock(&inode->i_mutex); + if (err) + return err; + } - inode_double_lock(inode, pipe->inode); - ret = remove_suid(out->f_path.dentry); - if (likely(!ret)) - ret = __splice_from_pipe(pipe, &sd, pipe_to_file); - inode_double_unlock(inode, pipe->inode); + ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); if (ret > 0) { unsigned long nr_pages; @@ -835,8 +840,6 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, * sync it. */ if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { - int err; - mutex_lock(&inode->i_mutex); err = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); @@ -1072,7 +1075,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) - *ppos += ret; + *ppos = sd.pos; return ret; } diff --git a/trunk/fs/udf/namei.c b/trunk/fs/udf/namei.c index d3231947db19..2b34c8ca6c83 100644 --- a/trunk/fs/udf/namei.c +++ b/trunk/fs/udf/namei.c @@ -32,7 +32,6 @@ #include #include #include -#include static inline int udf_match(int len1, const char *name1, int len2, const char *name2) @@ -159,8 +158,6 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); - int isdotdot = dentry->d_name.len == 2 && - dentry->d_name.name[0] == '.' && dentry->d_name.name[1] == '.'; size = udf_ext0_offset(dir) + dir->i_size; f_pos = udf_ext0_offset(dir); @@ -228,12 +225,6 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, continue; } - if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && - isdotdot) { - brelse(epos.bh); - return fi; - } - if (!lfi) continue; @@ -295,8 +286,9 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, } } unlock_kernel(); + d_add(dentry, inode); - return d_splice_alias(inode, dentry); + return NULL; } static struct fileIdentDesc *udf_add_entry(struct inode *dir, @@ -315,7 +307,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, uint16_t liu; int block; kernel_lb_addr eloc; - uint32_t elen = 0; + uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo; @@ -406,8 +398,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, } add: - /* Is there any extent whose size we need to round up? */ - if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && elen) { + if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(short_ad); @@ -1241,134 +1232,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; } -static struct dentry *udf_get_parent(struct dentry *child) -{ - struct dentry *parent; - struct inode *inode = NULL; - struct dentry dotdot; - struct fileIdentDesc cfi; - struct udf_fileident_bh fibh; - - dotdot.d_name.name = ".."; - dotdot.d_name.len = 2; - - lock_kernel(); - if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) - goto out_unlock; - - if (fibh.sbh != fibh.ebh) - brelse(fibh.ebh); - brelse(fibh.sbh); - - inode = udf_iget(child->d_inode->i_sb, - lelb_to_cpu(cfi.icb.extLocation)); - if (!inode) - goto out_unlock; - unlock_kernel(); - - parent = d_alloc_anon(inode); - if (!parent) { - iput(inode); - parent = ERR_PTR(-ENOMEM); - } - - return parent; -out_unlock: - unlock_kernel(); - return ERR_PTR(-EACCES); -} - - -static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block, - u16 partref, __u32 generation) -{ - struct inode *inode; - struct dentry *result; - kernel_lb_addr loc; - - if (block == 0) - return ERR_PTR(-ESTALE); - - loc.logicalBlockNum = block; - loc.partitionReferenceNum = partref; - inode = udf_iget(sb, loc); - - if (inode == NULL) - return ERR_PTR(-ENOMEM); - - if (generation && inode->i_generation != generation) { - iput(inode); - return ERR_PTR(-ESTALE); - } - result = d_alloc_anon(inode); - if (!result) { - iput(inode); - return ERR_PTR(-ENOMEM); - } - return result; -} - -static struct dentry *udf_fh_to_dentry(struct super_block *sb, - struct fid *fid, int fh_len, int fh_type) -{ - if ((fh_len != 3 && fh_len != 5) || - (fh_type != FILEID_UDF_WITH_PARENT && - fh_type != FILEID_UDF_WITHOUT_PARENT)) - return NULL; - - return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref, - fid->udf.generation); -} - -static struct dentry *udf_fh_to_parent(struct super_block *sb, - struct fid *fid, int fh_len, int fh_type) -{ - if (fh_len != 5 || fh_type != FILEID_UDF_WITH_PARENT) - return NULL; - - return udf_nfs_get_inode(sb, fid->udf.parent_block, - fid->udf.parent_partref, - fid->udf.parent_generation); -} -static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp, - int connectable) -{ - int len = *lenp; - struct inode *inode = de->d_inode; - kernel_lb_addr location = UDF_I(inode)->i_location; - struct fid *fid = (struct fid *)fh; - int type = FILEID_UDF_WITHOUT_PARENT; - - if (len < 3 || (connectable && len < 5)) - return 255; - - *lenp = 3; - fid->udf.block = location.logicalBlockNum; - fid->udf.partref = location.partitionReferenceNum; - fid->udf.generation = inode->i_generation; - - if (connectable && !S_ISDIR(inode->i_mode)) { - spin_lock(&de->d_lock); - inode = de->d_parent->d_inode; - location = UDF_I(inode)->i_location; - fid->udf.parent_block = location.logicalBlockNum; - fid->udf.parent_partref = location.partitionReferenceNum; - fid->udf.parent_generation = inode->i_generation; - spin_unlock(&de->d_lock); - *lenp = 5; - type = FILEID_UDF_WITH_PARENT; - } - - return type; -} - -const struct export_operations udf_export_ops = { - .encode_fh = udf_encode_fh, - .fh_to_dentry = udf_fh_to_dentry, - .fh_to_parent = udf_fh_to_parent, - .get_parent = udf_get_parent, -}; - const struct inode_operations udf_dir_inode_operations = { .lookup = udf_lookup, .create = udf_create, diff --git a/trunk/fs/udf/partition.c b/trunk/fs/udf/partition.c index 96dfd207c3d6..63610f026ae1 100644 --- a/trunk/fs/udf/partition.c +++ b/trunk/fs/udf/partition.c @@ -27,8 +27,8 @@ #include #include -uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, - uint16_t partition, uint32_t offset) +inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, + uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c index 7a5f69be6ac2..9fb18a340fc1 100644 --- a/trunk/fs/udf/super.c +++ b/trunk/fs/udf/super.c @@ -1933,7 +1933,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) /* Fill in the rest of the superblock */ sb->s_op = &udf_sb_ops; - sb->s_export_op = &udf_export_ops; sb->dq_op = NULL; sb->s_dirt = 0; sb->s_magic = UDF_SUPER_MAGIC; diff --git a/trunk/fs/udf/udfdecl.h b/trunk/fs/udf/udfdecl.h index 8fa9c2d70911..f3f45d029277 100644 --- a/trunk/fs/udf/udfdecl.h +++ b/trunk/fs/udf/udfdecl.h @@ -73,7 +73,6 @@ struct task_struct; struct buffer_head; struct super_block; -extern const struct export_operations udf_export_ops; extern const struct inode_operations udf_dir_inode_operations; extern const struct file_operations udf_dir_operations; extern const struct inode_operations udf_file_inode_operations; diff --git a/trunk/include/asm-alpha/types.h b/trunk/include/asm-alpha/types.h index c1541353ccef..a9e34ca4d463 100644 --- a/trunk/include/asm-alpha/types.h +++ b/trunk/include/asm-alpha/types.h @@ -23,11 +23,5 @@ typedef unsigned int umode_t; #define BITS_PER_LONG 64 -#ifndef __ASSEMBLY__ - -typedef u64 dma_addr_t; -typedef u64 dma64_addr_t; - -#endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ALPHA_TYPES_H */ diff --git a/trunk/include/asm-arm/arch-pxa/pm.h b/trunk/include/asm-arm/arch-pxa/pm.h index 261e5bc958db..9d9f4b54b2ce 100644 --- a/trunk/include/asm-arm/arch-pxa/pm.h +++ b/trunk/include/asm-arm/arch-pxa/pm.h @@ -10,7 +10,7 @@ #include struct pxa_cpu_pm_fns { - int save_count; + int save_size; void (*save)(unsigned long *); void (*restore)(unsigned long *); int (*valid)(suspend_state_t state); diff --git a/trunk/include/asm-arm/arch-pxa/system.h b/trunk/include/asm-arm/arch-pxa/system.h index 9aa6c2e939e8..a758a719180f 100644 --- a/trunk/include/asm-arm/arch-pxa/system.h +++ b/trunk/include/asm-arm/arch-pxa/system.h @@ -22,8 +22,7 @@ static inline void arch_idle(void) static inline void arch_reset(char mode) { - if (cpu_is_pxa2xx()) - RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; + RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; if (mode == 's') { /* Jump into ROM at address 0 */ diff --git a/trunk/include/asm-blackfin/dpmc.h b/trunk/include/asm-blackfin/dpmc.h index 7f34cd384f12..686cf83a5269 100644 --- a/trunk/include/asm-blackfin/dpmc.h +++ b/trunk/include/asm-blackfin/dpmc.h @@ -1,7 +1,7 @@ /* * include/asm-blackfin/dpmc.h - Miscellaneous IOCTL commands for Dynamic Power * Management Controller Driver. - * Copyright (C) 2004-2008 Analog Device Inc. + * Copyright (C) 2004 Analog Device Inc. * */ #ifndef _BLACKFIN_DPMC_H_ @@ -65,14 +65,6 @@ void disable_wdog_timer(void); extern unsigned long get_cclk(void); extern unsigned long get_sclk(void); -struct bfin_dpmc_platform_data { - const unsigned int *tuple_tab; - unsigned short tabsize; - unsigned short vr_settling_time; /* in us */ -}; - -#define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16)) - #endif /* __KERNEL__ */ #endif /*_BLACKFIN_DPMC_H_*/ diff --git a/trunk/include/asm-blackfin/entry.h b/trunk/include/asm-blackfin/entry.h index c4f721e0d00d..562c6d3a3232 100644 --- a/trunk/include/asm-blackfin/entry.h +++ b/trunk/include/asm-blackfin/entry.h @@ -17,11 +17,6 @@ #define PF_DTRACE_OFF 1 #define PF_DTRACE_BIT 5 -/* - * NOTE! The single-stepping code assumes that all interrupt handlers - * start by saving SYSCFG on the stack with their first instruction. - */ - /* This one is used for exceptions, emulation, and NMI. It doesn't push RETI and doesn't do cli. */ #define SAVE_ALL_SYS save_context_no_interrupts diff --git a/trunk/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h index 26e3c8076b4e..f0ab2736a680 100644 --- a/trunk/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h @@ -44,15 +44,10 @@ #define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v) #define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v) #define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v) -#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v)) -#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v)) #define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v) #define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v) #define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v) -#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0) -#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0) - #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) # define CONFIG_SERIAL_BFIN_CTSRTS diff --git a/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h index d016603b6615..fbe88dee3e2d 100644 --- a/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h @@ -44,15 +44,10 @@ #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) #define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) #define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) -#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v)) -#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v)) #define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) #define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) -#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0) -#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0) - #ifdef CONFIG_BFIN_UART0_CTSRTS # define CONFIG_SERIAL_BFIN_CTSRTS # ifndef CONFIG_UART0_CTS_PIN diff --git a/trunk/include/asm-blackfin/mach-bf533/defBF532.h b/trunk/include/asm-blackfin/mach-bf533/defBF532.h index 0ab4dd7494cf..17e1548cec08 100644 --- a/trunk/include/asm-blackfin/mach-bf533/defBF532.h +++ b/trunk/include/asm-blackfin/mach-bf533/defBF532.h @@ -468,8 +468,6 @@ #define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */ #define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ #define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ -#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */ -#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */ #define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */ #define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */ diff --git a/trunk/include/asm-blackfin/mach-bf533/irq.h b/trunk/include/asm-blackfin/mach-bf533/irq.h index 5aa38e5da6b7..832e6f6122da 100644 --- a/trunk/include/asm-blackfin/mach-bf533/irq.h +++ b/trunk/include/asm-blackfin/mach-bf533/irq.h @@ -66,13 +66,12 @@ Core Emulation ** DMA8/9 Interrupt IVG13 28 DMA10/11 Interrupt IVG13 29 Watchdog Timer IVG13 30 - - Softirq IVG14 31 - System Call -- + Software Interrupt 1 IVG14 31 + Software Interrupt 2 -- (lowest priority) IVG15 32 * */ -#define SYS_IRQS 31 -#define NR_PERI_INTS 24 +#define SYS_IRQS 32 +#define NR_PERI_INTS 24 /* The ABSTRACT IRQ definitions */ /** the first seven of the following are fixed, the rest you change if you need to **/ @@ -97,7 +96,7 @@ Core Emulation ** #define IRQ_SPORT0_TX 17 /*DMA2 Interrupt (SPORT0 TX) */ #define IRQ_SPORT1_RX 18 /*DMA3 Interrupt (SPORT1 RX) */ #define IRQ_SPORT1_TX 19 /*DMA4 Interrupt (SPORT1 TX) */ -#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */ +#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */ #define IRQ_UART_RX 21 /*DMA6 Interrupt (UART RX) */ #define IRQ_UART_TX 22 /*DMA7 Interrupt (UART TX) */ #define IRQ_TMR0 23 /*Timer 0 */ @@ -109,6 +108,9 @@ Core Emulation ** #define IRQ_MEM_DMA1 29 /*DMA10/11 Interrupt (Memory DMA Stream 1) */ #define IRQ_WATCH 30 /*Watch Dog Timer */ +#define IRQ_SW_INT1 31 /*Software Int 1 */ +#define IRQ_SW_INT2 32 /*Software Int 2 (reserved for SYSCALL) */ + #define IRQ_PF0 33 #define IRQ_PF1 34 #define IRQ_PF2 35 diff --git a/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h index f79d1a0e9129..fd100a415b98 100644 --- a/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h @@ -44,15 +44,10 @@ #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) #define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) #define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) -#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v)) -#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v)) #define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) #define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) -#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0) -#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0) - #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) # define CONFIG_SERIAL_BFIN_CTSRTS diff --git a/trunk/include/asm-blackfin/mach-bf537/irq.h b/trunk/include/asm-blackfin/mach-bf537/irq.h index 2e68a8a1e730..be6f2ff77f31 100644 --- a/trunk/include/asm-blackfin/mach-bf537/irq.h +++ b/trunk/include/asm-blackfin/mach-bf537/irq.h @@ -34,23 +34,24 @@ /* * Interrupt source definitions - * Event Source Core Event Name - * Core Emulation ** - * Events (highest priority) EMU 0 - * Reset RST 1 - * NMI NMI 2 - * Exception EVX 3 - * Reserved -- 4 - * Hardware Error IVHW 5 - * Core Timer IVTMR 6 - * ..... - * - * Softirq IVG14 - * System Call -- - * (lowest priority) IVG15 + Event Source Core Event Name +Core Emulation ** + Events (highest priority) EMU 0 + Reset RST 1 + NMI NMI 2 + Exception EVX 3 + Reserved -- 4 + Hardware Error IVHW 5 + Core Timer IVTMR 6 * + +..... + + Software Interrupt 1 IVG14 31 + Software Interrupt 2 -- + (lowest priority) IVG15 32 * */ -#define SYS_IRQS 39 +#define SYS_IRQS 41 #define NR_PERI_INTS 32 /* The ABSTRACT IRQ definitions */ @@ -94,8 +95,10 @@ #define IRQ_PORTG_INTB 35 /* PF Port G (PF15:0) Interrupt B */ #define IRQ_MEM_DMA0 36 /*(Memory DMA Stream 0) */ #define IRQ_MEM_DMA1 37 /*(Memory DMA Stream 1) */ -#define IRQ_PROG_INTB 38 /* PF Ports F (PF15:0) Interrupt B */ +#define IRQ_PROG_INTB 38 /* PF Ports F (PF15:0) Interrupt B */ #define IRQ_WATCH 38 /*Watch Dog Timer */ +#define IRQ_SW_INT1 40 /*Software Int 1 */ +#define IRQ_SW_INT2 41 /*Software Int 2 (reserved for SYSCALL) */ #define IRQ_PPI_ERROR 42 /*PPI Error Interrupt */ #define IRQ_CAN_ERROR 43 /*CAN Error Interrupt */ diff --git a/trunk/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h index 5eb46a77d919..6547027cd3e6 100644 --- a/trunk/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h @@ -54,9 +54,6 @@ #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) #define UART_PUT_MCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_MCR),v) -#define UART_SET_DLAB(uart) /* MMRs not muxed on BF54x */ -#define UART_CLEAR_DLAB(uart) /* MMRs not muxed on BF54x */ - #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) # define CONFIG_SERIAL_BFIN_CTSRTS diff --git a/trunk/include/asm-blackfin/mach-bf548/defBF54x_base.h b/trunk/include/asm-blackfin/mach-bf548/defBF54x_base.h index e022e896cb18..08f90c21fe8a 100644 --- a/trunk/include/asm-blackfin/mach-bf548/defBF54x_base.h +++ b/trunk/include/asm-blackfin/mach-bf548/defBF54x_base.h @@ -2329,26 +2329,6 @@ #define KPADWE 0x1000 /* Keypad Wake-Up Enable */ #define ROTWE 0x2000 /* Rotary Wake-Up Enable */ -#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */ -#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */ -#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */ - -#define GAIN_5 0x0000 /* GAIN = 5*/ -#define GAIN_10 0x0004 /* GAIN = 1*/ -#define GAIN_20 0x0008 /* GAIN = 2*/ -#define GAIN_50 0x000C /* GAIN = 5*/ - -#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ -#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ -#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ -#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ -#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */ -#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */ -#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ -#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ -#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */ -#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */ - /* Bit masks for NFC_CTL */ #define WR_DLY 0xf /* Write Strobe Delay */ diff --git a/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h index 7a9628769296..8a4e66d1db37 100644 --- a/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h +++ b/trunk/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h @@ -44,15 +44,10 @@ #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) #define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) #define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v) -#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v)) -#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v)) #define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) #define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) -#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0) -#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0) - #ifdef CONFIG_BFIN_UART0_CTSRTS # define CONFIG_SERIAL_BFIN_CTSRTS # ifndef CONFIG_UART0_CTS_PIN diff --git a/trunk/include/asm-blackfin/mach-bf561/defBF561.h b/trunk/include/asm-blackfin/mach-bf561/defBF561.h index 1ab50e906fe7..366c9b9a0cb7 100644 --- a/trunk/include/asm-blackfin/mach-bf561/defBF561.h +++ b/trunk/include/asm-blackfin/mach-bf561/defBF561.h @@ -868,34 +868,6 @@ #define CHIPID_FAMILY 0x0FFFF000 #define CHIPID_MANUFACTURE 0x00000FFE -/* VR_CTL Masks */ -#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */ -#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */ -#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */ -#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */ -#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */ - -#define GAIN 0x000C /* Voltage Level Gain */ -#define GAIN_5 0x0000 /* GAIN = 5*/ -#define GAIN_10 0x0004 /* GAIN = 1*/ -#define GAIN_20 0x0008 /* GAIN = 2*/ -#define GAIN_50 0x000C /* GAIN = 5*/ - -#define VLEV 0x00F0 /* Internal Voltage Level */ -#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ -#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ -#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ -#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ -#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */ -#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */ -#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ -#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ -#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */ -#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */ - -#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */ -#define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */ - /* PLL_DIV Masks */ #define SCLK_DIV(x) (x) /* SCLK = VCO / x */ diff --git a/trunk/include/asm-blackfin/mach-bf561/irq.h b/trunk/include/asm-blackfin/mach-bf561/irq.h index 6698389c5564..83f0383957d2 100644 --- a/trunk/include/asm-blackfin/mach-bf561/irq.h +++ b/trunk/include/asm-blackfin/mach-bf561/irq.h @@ -118,13 +118,12 @@ Supplemental interrupt 0 IVG7 69 supplemental interrupt 1 IVG7 70 - Softirq IVG14 - System Call -- - (lowest priority) IVG15 - + Software Interrupt 1 IVG14 71 + Software Interrupt 2 IVG15 72 * + (lowest priority) **********************************************************************/ -#define SYS_IRQS 71 +#define SYS_IRQS 72 #define NR_PERI_INTS 64 /* @@ -238,7 +237,9 @@ #define IRQ_RESERVED_2 (IVG_BASE + 61) /* Reserved interrupt */ #define IRQ_SUPPLE_0 (IVG_BASE + 62) /* Supplemental interrupt 0 */ #define IRQ_SUPPLE_1 (IVG_BASE + 63) /* supplemental interrupt 1 */ - +#define IRQ_SW_INT1 71 /* Software Interrupt 1 */ +#define IRQ_SW_INT2 72 /* Software Interrupt 2 */ + /* reserved for SYSCALL */ #define IRQ_PF0 73 #define IRQ_PF1 74 #define IRQ_PF2 75 diff --git a/trunk/include/asm-blackfin/mach-common/context.S b/trunk/include/asm-blackfin/mach-common/context.S index c0e630edfb9a..fd0ebe1862b8 100644 --- a/trunk/include/asm-blackfin/mach-common/context.S +++ b/trunk/include/asm-blackfin/mach-common/context.S @@ -27,11 +27,6 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -/* - * NOTE! The single-stepping code assumes that all interrupt handlers - * start by saving SYSCFG on the stack with their first instruction. - */ - /* * Code to save processor context. * We even save the register which are preserved by a function call diff --git a/trunk/include/asm-blackfin/time.h b/trunk/include/asm-blackfin/time.h index ddc43ce38533..6e5859b6ea32 100644 --- a/trunk/include/asm-blackfin/time.h +++ b/trunk/include/asm-blackfin/time.h @@ -24,8 +24,6 @@ #ifndef CONFIG_CPU_FREQ #define TIME_SCALE 1 -#define __bfin_cycles_off (0) -#define __bfin_cycles_mod (0) #else /* * Blackfin CPU frequency scaling supports max Core Clock 1, 1/2 and 1/4 . @@ -33,8 +31,6 @@ * adjust the Core Timer Presale Register. This way we don't lose time. */ #define TIME_SCALE 4 -extern unsigned long long __bfin_cycles_off; -extern unsigned int __bfin_cycles_mod; #endif #endif diff --git a/trunk/include/asm-m68k/machw.h b/trunk/include/asm-m68k/machw.h index 35624998291c..d2e0e25d5c90 100644 --- a/trunk/include/asm-m68k/machw.h +++ b/trunk/include/asm-m68k/machw.h @@ -66,6 +66,36 @@ struct MAC_SCC # define mac_scc ((*(volatile struct SCC*)MAC_SCC_BAS)) #endif +/* hardware stuff */ + +#define MACHW_DECLARE(name) unsigned name : 1 +#define MACHW_SET(name) (mac_hw_present.name = 1) +#define MACHW_PRESENT(name) (mac_hw_present.name) + +struct mac_hw_present { + /* video hardware */ + /* sound hardware */ + /* disk storage interfaces */ + MACHW_DECLARE(MAC_SCSI_80); /* Directly mapped NCR5380 */ + MACHW_DECLARE(MAC_SCSI_96); /* 53c9[46] */ + MACHW_DECLARE(MAC_SCSI_96_2); /* 2nd 53c9[46] Q900 and Q950 */ + MACHW_DECLARE(IDE); /* IDE Interface */ + /* other I/O hardware */ + MACHW_DECLARE(SCC); /* Serial Communications Contr. */ + /* DMA */ + MACHW_DECLARE(SCSI_DMA); /* DMA for the NCR5380 */ + /* real time clocks */ + MACHW_DECLARE(RTC_CLK); /* clock chip */ + /* supporting hardware */ + MACHW_DECLARE(VIA1); /* Versatile Interface Ad. 1 */ + MACHW_DECLARE(VIA2); /* Versatile Interface Ad. 2 */ + MACHW_DECLARE(RBV); /* Versatile Interface Ad. 2+ */ + /* NUBUS */ + MACHW_DECLARE(NUBUS); /* NUBUS */ +}; + +extern struct mac_hw_present mac_hw_present; + #endif /* __ASSEMBLY__ */ #endif /* linux/machw.h */ diff --git a/trunk/include/asm-mips/types.h b/trunk/include/asm-mips/types.h index bcbb8d675af5..7a2ee4f40131 100644 --- a/trunk/include/asm-mips/types.h +++ b/trunk/include/asm-mips/types.h @@ -19,6 +19,8 @@ typedef unsigned short umode_t; +#endif + #endif /* __ASSEMBLY__ */ /* diff --git a/trunk/include/asm-mn10300/processor.h b/trunk/include/asm-mn10300/processor.h index 73239271873d..f1b081f53468 100644 --- a/trunk/include/asm-mn10300/processor.h +++ b/trunk/include/asm-mn10300/processor.h @@ -58,7 +58,7 @@ extern struct mn10300_cpuinfo boot_cpu_data; extern void identify_cpu(struct mn10300_cpuinfo *); extern void print_cpu_info(struct mn10300_cpuinfo *); extern void dodgy_tsc(void); -#define cpu_relax() barrier() +#define cpu_relax() do {} while (0) /* * User space process size: 1.75GB (default). diff --git a/trunk/include/asm-powerpc/io.h b/trunk/include/asm-powerpc/io.h index e0062d73db1c..afae0697e8ce 100644 --- a/trunk/include/asm-powerpc/io.h +++ b/trunk/include/asm-powerpc/io.h @@ -2,7 +2,7 @@ #define _ASM_POWERPC_IO_H #ifdef __KERNEL__ -/* +/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -18,9 +18,6 @@ extern int check_legacy_ioport(unsigned long base_port); #define _PNPWRP 0xa79 #define PNPBIOS_BASE 0xf000 -#include -#include - #include #include #include @@ -747,9 +744,6 @@ static inline void * bus_to_virt(unsigned long address) #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) -void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, - size_t size, unsigned long flags); - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_IO_H */ diff --git a/trunk/include/asm-powerpc/kvm_host.h b/trunk/include/asm-powerpc/kvm_host.h index 81a69d711017..04ffbb8e0a35 100644 --- a/trunk/include/asm-powerpc/kvm_host.h +++ b/trunk/include/asm-powerpc/kvm_host.h @@ -59,7 +59,6 @@ struct kvm_vcpu_stat { u32 emulated_inst_exits; u32 dec_exits; u32 ext_intr_exits; - u32 halt_wakeup; }; struct tlbe { diff --git a/trunk/include/asm-powerpc/kvm_ppc.h b/trunk/include/asm-powerpc/kvm_ppc.h index b35a7e3ef978..7ac820308a7e 100644 --- a/trunk/include/asm-powerpc/kvm_ppc.h +++ b/trunk/include/asm-powerpc/kvm_ppc.h @@ -77,17 +77,12 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) clear_bit(priority, &vcpu->arch.pending_exceptions); } -/* Helper function for "full" MSR writes. No need to call this if only EE is - * changing. */ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) { if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); vcpu->arch.msr = new_msr; - - if (vcpu->arch.msr & MSR_WE) - kvm_vcpu_block(vcpu); } #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/trunk/include/asm-powerpc/pgtable-ppc32.h b/trunk/include/asm-powerpc/pgtable-ppc32.h index c08e714d0c42..7c97b5a08d08 100644 --- a/trunk/include/asm-powerpc/pgtable-ppc32.h +++ b/trunk/include/asm-powerpc/pgtable-ppc32.h @@ -209,13 +209,6 @@ extern int icache_44x_need_flush; * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR * - * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional - * TLB2 storage attibute fields. Those are: - * - * TLB2: - * 0...10 11 12 13 14 15 16...31 - * no change WL1 IL1I IL1D IL2I IL2D no change - * * There are some constrains and options, to decide mapping software bits * into TLB entry. * diff --git a/trunk/include/asm-powerpc/spu.h b/trunk/include/asm-powerpc/spu.h index 6abead6e681a..e3c845b0f764 100644 --- a/trunk/include/asm-powerpc/spu.h +++ b/trunk/include/asm-powerpc/spu.h @@ -100,7 +100,6 @@ /* Flag indicating progress during context switch. */ #define SPU_CONTEXT_SWITCH_PENDING 0UL -#define SPU_CONTEXT_FAULT_PENDING 1UL struct spu_context; struct spu_runqueue; @@ -129,11 +128,9 @@ struct spu { unsigned int irqs[3]; u32 node; u64 flags; + u64 dar; + u64 dsisr; u64 class_0_pending; - u64 class_0_dar; - u64 class_0_dsisr; - u64 class_1_dar; - u64 class_1_dsisr; size_t ls_size; unsigned int slb_replace; struct mm_struct *mm; @@ -146,7 +143,7 @@ struct spu { void (* wbox_callback)(struct spu *spu); void (* ibox_callback)(struct spu *spu); - void (* stop_callback)(struct spu *spu, int irq); + void (* stop_callback)(struct spu *spu); void (* mfc_callback)(struct spu *spu); char irq_c0[8]; diff --git a/trunk/include/asm-powerpc/spu_csa.h b/trunk/include/asm-powerpc/spu_csa.h index 129ec148d451..0ab6bff86078 100644 --- a/trunk/include/asm-powerpc/spu_csa.h +++ b/trunk/include/asm-powerpc/spu_csa.h @@ -254,8 +254,7 @@ struct spu_state { u64 spu_chnldata_RW[32]; u32 spu_mailbox_data[4]; u32 pu_mailbox_data[1]; - u64 class_0_dar, class_0_dsisr, class_0_pending; - u64 class_1_dar, class_1_dsisr; + u64 dar, dsisr, class_0_pending; unsigned long suspend_time; spinlock_t register_lock; }; diff --git a/trunk/include/asm-s390/kvm_host.h b/trunk/include/asm-s390/kvm_host.h index 18cbd8a39796..f8204a4f2e02 100644 --- a/trunk/include/asm-s390/kvm_host.h +++ b/trunk/include/asm-s390/kvm_host.h @@ -104,7 +104,6 @@ struct sie_block { struct kvm_vcpu_stat { u32 exit_userspace; - u32 exit_null; u32 exit_external_request; u32 exit_external_interrupt; u32 exit_stop_request; diff --git a/trunk/include/asm-s390/page.h b/trunk/include/asm-s390/page.h index 12fd9c4f0f15..f0f4579eac13 100644 --- a/trunk/include/asm-s390/page.h +++ b/trunk/include/asm-s390/page.h @@ -125,17 +125,6 @@ page_get_storage_key(unsigned long addr) return skey; } -#ifdef CONFIG_PAGE_STATES - -struct page; -void arch_free_page(struct page *page, int order); -void arch_alloc_page(struct page *page, int order); - -#define HAVE_ARCH_FREE_PAGE -#define HAVE_ARCH_ALLOC_PAGE - -#endif - #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ diff --git a/trunk/include/asm-s390/ptrace.h b/trunk/include/asm-s390/ptrace.h index d7d4e2eb3e6f..441d7c260857 100644 --- a/trunk/include/asm-s390/ptrace.h +++ b/trunk/include/asm-s390/ptrace.h @@ -471,8 +471,6 @@ struct task_struct; extern void user_enable_single_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *); -#define __ARCH_WANT_COMPAT_SYS_PTRACE - #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) #define regs_return_value(regs)((regs)->gprs[2]) diff --git a/trunk/include/asm-s390/system.h b/trunk/include/asm-s390/system.h index e0d4500d5f95..c819ae25a842 100644 --- a/trunk/include/asm-s390/system.h +++ b/trunk/include/asm-s390/system.h @@ -116,12 +116,6 @@ extern void pfault_fini(void); #define pfault_fini() do { } while (0) #endif /* CONFIG_PFAULT */ -#ifdef CONFIG_PAGE_STATES -extern void cmma_init(void); -#else -static inline void cmma_init(void) { } -#endif - #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ account_vtime(prev); \ diff --git a/trunk/include/asm-sh/cpu-sh3/dma.h b/trunk/include/asm-sh/cpu-sh3/dma.h index 6813c3220a1d..092ff9d872c3 100644 --- a/trunk/include/asm-sh/cpu-sh3/dma.h +++ b/trunk/include/asm-sh/cpu-sh3/dma.h @@ -3,19 +3,19 @@ #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) + defined(CONFIG_CPU_SUBTYPE_SH7721) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) #define SH_DMAC_BASE 0xa4010020 -#else -#define SH_DMAC_BASE 0xa4000020 -#endif -#if defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7709) #define DMTE0_IRQ 48 #define DMTE1_IRQ 49 #define DMTE2_IRQ 50 #define DMTE3_IRQ 51 #define DMTE4_IRQ 76 #define DMTE5_IRQ 77 + +#else +#define SH_DMAC_BASE 0xa4000020 #endif /* Definitions for the SuperH DMAC */ diff --git a/trunk/include/asm-sh/hw_irq.h b/trunk/include/asm-sh/hw_irq.h index 7438d1e21bc9..c958fdaa0095 100644 --- a/trunk/include/asm-sh/hw_irq.h +++ b/trunk/include/asm-sh/hw_irq.h @@ -79,10 +79,6 @@ struct intc_desc { struct intc_sense_reg *sense_regs; unsigned int nr_sense_regs; char *name; -#ifdef CONFIG_CPU_SH3 - struct intc_mask_reg *ack_regs; - unsigned int nr_ack_regs; -#endif }; #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) @@ -95,25 +91,10 @@ struct intc_desc symbol __initdata = { \ chipname, \ } -#ifdef CONFIG_CPU_SH3 -#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ - mask_regs, prio_regs, sense_regs, ack_regs) \ -struct intc_desc symbol __initdata = { \ - _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ - _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ - _INTC_ARRAY(sense_regs), \ - chipname, \ - _INTC_ARRAY(ack_regs), \ -} -#endif - void __init register_intc_controller(struct intc_desc *desc); int intc_set_priority(unsigned int irq, unsigned int prio); void __init plat_irq_setup(void); -#ifdef CONFIG_CPU_SH3 -void __init plat_irq_setup_sh3(void); -#endif enum { IRQ_MODE_IRQ, IRQ_MODE_IRQ7654, IRQ_MODE_IRQ3210, IRQ_MODE_IRL7654_MASK, IRQ_MODE_IRL3210_MASK, diff --git a/trunk/include/asm-sh/io.h b/trunk/include/asm-sh/io.h index a4fbf0c84fb1..356e50d06745 100644 --- a/trunk/include/asm-sh/io.h +++ b/trunk/include/asm-sh/io.h @@ -268,6 +268,11 @@ unsigned long long peek_real_address_q(unsigned long long addr); unsigned long long poke_real_address_q(unsigned long long addr, unsigned long long val); +/* arch/sh/mm/ioremap_64.c */ +unsigned long onchip_remap(unsigned long addr, unsigned long size, + const char *name); +extern void onchip_unmap(unsigned long vaddr); + #if !defined(CONFIG_MMU) #define virt_to_phys(address) ((unsigned long)(address)) #define phys_to_virt(address) ((void *)(address)) @@ -297,16 +302,9 @@ unsigned long long poke_real_address_q(unsigned long long addr, void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); void __iounmap(void __iomem *addr); - -/* arch/sh/mm/ioremap_64.c */ -unsigned long onchip_remap(unsigned long addr, unsigned long size, - const char *name); -extern void onchip_unmap(unsigned long vaddr); #else #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) -#define onchip_remap(addr, size, name) (addr) -#define onchip_unmap(addr) do { } while (0) #endif /* CONFIG_MMU */ static inline void __iomem * diff --git a/trunk/include/asm-sh/keyboard.h b/trunk/include/asm-sh/keyboard.h new file mode 100644 index 000000000000..31dcc4fa5f28 --- /dev/null +++ b/trunk/include/asm-sh/keyboard.h @@ -0,0 +1,13 @@ +#ifndef __ASM_SH_KEYBOARD_H +#define __ASM_SH_KEYBOARD_H +/* + * $Id: keyboard.h,v 1.1.1.1 2001/10/15 20:45:09 mrbrown Exp $ + */ + +#include +#include + +#ifdef CONFIG_SH_MPC1211 +#include +#endif +#endif diff --git a/trunk/include/asm-sh/mmu_context.h b/trunk/include/asm-sh/mmu_context.h index 87e812f68bb0..fe58d00b250c 100644 --- a/trunk/include/asm-sh/mmu_context.h +++ b/trunk/include/asm-sh/mmu_context.h @@ -27,7 +27,6 @@ /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 -#ifdef CONFIG_MMU #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) @@ -39,6 +38,7 @@ */ #define MMU_VPN_MASK 0xfffff000 +#ifdef CONFIG_MMU #if defined(CONFIG_SUPERH32) #include "mmu_context_32.h" #else @@ -129,8 +129,6 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) -#define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) -#define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) #define activate_context(mm,cpu) do { } while (0) diff --git a/trunk/include/asm-sh/mmzone.h b/trunk/include/asm-sh/mmzone.h index 2969253c4042..7969f381dff2 100644 --- a/trunk/include/asm-sh/mmzone.h +++ b/trunk/include/asm-sh/mmzone.h @@ -41,8 +41,6 @@ void __init plat_mem_setup(void); /* arch/sh/kernel/setup.c */ void __init setup_bootmem_allocator(unsigned long start_pfn); -void __init __add_active_range(unsigned int nid, unsigned long start_pfn, - unsigned long end_pfn); #endif /* __KERNEL__ */ #endif /* __ASM_SH_MMZONE_H */ diff --git a/trunk/include/asm-sh/mpc1211/dma.h b/trunk/include/asm-sh/mpc1211/dma.h new file mode 100644 index 000000000000..e506d1aaa0d0 --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/dma.h @@ -0,0 +1,303 @@ +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_MPC1211_DMA_H +#define _ASM_MPC1211_DMA_H + +#include /* And spinlocks */ +#include /* need byte IO */ +#include + + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* The maximum address that we can perform a DMA transfer to on this platform */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000) + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while holding the DMA lock ! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr<=3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr&3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch(dmanr) { + case 0: + dma_outb( pagenr & 0xff, DMA_PAGE_0); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_0 + 0x400); + break; + case 1: + dma_outb( pagenr & 0xff, DMA_PAGE_1); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_1 + 0x400); + break; + case 2: + dma_outb( pagenr & 0xff, DMA_PAGE_2); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_2 + 0x400); + break; + case 3: + dma_outb( pagenr & 0xff, DMA_PAGE_3); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_3 + 0x400); + break; + case 5: + dma_outb( pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_5 + 0x400); + break; + case 6: + dma_outb( pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_6 + 0x400); + break; + case 7: + dma_outb( pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8) & 0xff, DMA_PAGE_7 + 0x400); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + set_dma_page(dmanr, a>>16); + if (dmanr <= 3) { + dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + } else { + dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + } +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + } else { + dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE + : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + return (dmanr<=3)? count : (count<<1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_MPC1211_DMA_H */ diff --git a/trunk/include/asm-sh/mpc1211/io.h b/trunk/include/asm-sh/mpc1211/io.h new file mode 100644 index 000000000000..6298370bec2d --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/io.h @@ -0,0 +1,22 @@ +/* + * include/asm-sh/mpc1211/io.h + * + * Copyright 2001 Saito.K & Jeanne + * + * IO functions for an Interface MPC-1211 + */ + +#ifndef _ASM_SH_IO_MPC1211_H +#define _ASM_SH_IO_MPC1211_H + +#include + +extern int mpc1211_irq_demux(int irq); + +extern void init_mpc1211_IRQ(void); +extern void heartbeat_mpc1211(void); + +extern void mpc1211_rtc_gettimeofday(struct timeval *tv); +extern int mpc1211_rtc_settimeofday(const struct timeval *tv); + +#endif /* _ASM_SH_IO_MPC1211_H */ diff --git a/trunk/include/asm-sh/mpc1211/keyboard.h b/trunk/include/asm-sh/mpc1211/keyboard.h new file mode 100644 index 000000000000..9020feee7b4c --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/keyboard.h @@ -0,0 +1,60 @@ +/* + * MPC1211 specific keybord definitions + * Taken from the old asm-i386/keybord.h for PC/AT-style definitions + * created 3 Nov 1996 by Geert Uytterhoeven. + */ + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +#define KEYBOARD_IRQ 1 +#define DISABLE_KBD_DURING_INTERRUPTS 0 + +extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); +extern int pckbd_getkeycode(unsigned int scancode); +extern int pckbd_translate(unsigned char scancode, unsigned char *keycode, + char raw_mode); +extern char pckbd_unexpected_up(unsigned char keycode); +extern void pckbd_leds(unsigned char leds); +extern void pckbd_init_hw(void); +extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *); +extern pm_callback pm_kbd_request_override; + +#define kbd_setkeycode pckbd_setkeycode +#define kbd_getkeycode pckbd_getkeycode +#define kbd_translate pckbd_translate +#define kbd_unexpected_up pckbd_unexpected_up +#define kbd_leds pckbd_leds +#define kbd_init_hw pckbd_init_hw + +/* resource allocation */ +#define kbd_request_region() +#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \ + "keyboard", NULL) + +/* How to access the keyboard macros on this platform. */ +#define kbd_read_input() inb(KBD_DATA_REG) +#define kbd_read_status() inb(KBD_STATUS_REG) +#define kbd_write_output(val) outb(val, KBD_DATA_REG) +#define kbd_write_command(val) outb(val, KBD_CNTL_REG) + +/* Some stoneage hardware needs delays after some operations. */ +#define kbd_pause() do { } while(0) + +/* + * Machine specific bits for the PS/2 driver + */ + +#define AUX_IRQ 12 + +#define aux_request_irq(hand, dev_id) \ + request_irq(AUX_IRQ, hand, IRQF_SHARED, "PS2 Mouse", dev_id) + +#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) + +#endif /* __KERNEL__ */ diff --git a/trunk/include/asm-sh/mpc1211/m1543c.h b/trunk/include/asm-sh/mpc1211/m1543c.h new file mode 100644 index 000000000000..c95d13236c3b --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/m1543c.h @@ -0,0 +1,200 @@ +#ifndef __ASM_SH_M1543C_H +#define __ASM_SH_M1543C_H + +/* + * linux/include/asm-sh/m1543c.h + * Copyright (C) 2001 Nobuhiro Sakawa + * M1543C:PCI-ISA Bus Bridge with Super IO Chip support + * + * from + * + * linux/include/asm-sh/smc37c93x.h + * + * Copyright (C) 2000 Kazumoto Kojima + * + * SMSC 37C93x Super IO Chip support + */ + +/* Default base I/O address */ +#define FDC_PRIMARY_BASE 0x3f0 +#define IDE1_PRIMARY_BASE 0x1f0 +#define IDE1_SECONDARY_BASE 0x170 +#define PARPORT_PRIMARY_BASE 0x378 +#define COM1_PRIMARY_BASE 0x2f8 +#define COM2_PRIMARY_BASE 0x3f8 +#define COM3_PRIMARY_BASE 0x3e8 +#define RTC_PRIMARY_BASE 0x070 +#define KBC_PRIMARY_BASE 0x060 +#define AUXIO_PRIMARY_BASE 0x000 /* XXX */ +#define I8259_M_CR 0x20 +#define I8259_M_MR 0x21 +#define I8259_S_CR 0xa0 +#define I8259_S_MR 0xa1 + +/* Logical device number */ +#define LDN_FDC 0 +#define LDN_IDE1 1 +#define LDN_IDE2 2 +#define LDN_PARPORT 3 +#define LDN_COM1 4 +#define LDN_COM2 5 +#define LDN_COM3 11 +#define LDN_RTC 6 +#define LDN_KBC 7 + +/* Configuration port and key */ +#define CONFIG_PORT 0x3f0 +#define INDEX_PORT CONFIG_PORT +#define DATA_PORT 0x3f1 +#define CONFIG_ENTER1 0x51 +#define CONFIG_ENTER2 0x23 +#define CONFIG_EXIT 0xbb + +/* Configuration index */ +#define CURRENT_LDN_INDEX 0x07 +#define POWER_CONTROL_INDEX 0x22 +#define ACTIVATE_INDEX 0x30 +#define IO_BASE_HI_INDEX 0x60 +#define IO_BASE_LO_INDEX 0x61 +#define IRQ_SELECT_INDEX 0x70 +#define PS2_IRQ_INDEX 0x72 +#define DMA_SELECT_INDEX 0x74 + +/* UART stuff. Only for debugging. */ +/* UART Register */ + +#define UART_RBR 0x0 /* Receiver Buffer Register (Read Only) */ +#define UART_THR 0x0 /* Transmitter Holding Register (Write Only) */ +#define UART_IER 0x2 /* Interrupt Enable Register */ +#define UART_IIR 0x4 /* Interrupt Ident Register (Read Only) */ +#define UART_FCR 0x4 /* FIFO Control Register (Write Only) */ +#define UART_LCR 0x6 /* Line Control Register */ +#define UART_MCR 0x8 /* MODEM Control Register */ +#define UART_LSR 0xa /* Line Status Register */ +#define UART_MSR 0xc /* MODEM Status Register */ +#define UART_SCR 0xe /* Scratch Register */ +#define UART_DLL 0x0 /* Divisor Latch (LS) */ +#define UART_DLM 0x2 /* Divisor Latch (MS) */ + +#ifndef __ASSEMBLY__ +typedef struct uart_reg { + volatile __u16 rbr; + volatile __u16 ier; + volatile __u16 iir; + volatile __u16 lcr; + volatile __u16 mcr; + volatile __u16 lsr; + volatile __u16 msr; + volatile __u16 scr; +} uart_reg; +#endif /* ! __ASSEMBLY__ */ + +/* Alias for Write Only Register */ + +#define thr rbr +#define tcr iir + +/* Alias for Divisor Latch Register */ + +#define dll rbr +#define dlm ier +#define fcr iir + +/* Interrupt Enable Register */ + +#define IER_ERDAI 0x0100 /* Enable Received Data Available Interrupt */ +#define IER_ETHREI 0x0200 /* Enable Transmitter Holding Register Empty Interrupt */ +#define IER_ELSI 0x0400 /* Enable Receiver Line Status Interrupt */ +#define IER_EMSI 0x0800 /* Enable MODEM Status Interrupt */ + +/* Interrupt Ident Register */ + +#define IIR_IP 0x0100 /* "0" if Interrupt Pending */ +#define IIR_IIB0 0x0200 /* Interrupt ID Bit 0 */ +#define IIR_IIB1 0x0400 /* Interrupt ID Bit 1 */ +#define IIR_IIB2 0x0800 /* Interrupt ID Bit 2 */ +#define IIR_FIFO 0xc000 /* FIFOs enabled */ + +/* FIFO Control Register */ + +#define FCR_FEN 0x0100 /* FIFO enable */ +#define FCR_RFRES 0x0200 /* Receiver FIFO reset */ +#define FCR_TFRES 0x0400 /* Transmitter FIFO reset */ +#define FCR_DMA 0x0800 /* DMA mode select */ +#define FCR_RTL 0x4000 /* Receiver triger (LSB) */ +#define FCR_RTM 0x8000 /* Receiver triger (MSB) */ + +/* Line Control Register */ + +#define LCR_WLS0 0x0100 /* Word Length Select Bit 0 */ +#define LCR_WLS1 0x0200 /* Word Length Select Bit 1 */ +#define LCR_STB 0x0400 /* Number of Stop Bits */ +#define LCR_PEN 0x0800 /* Parity Enable */ +#define LCR_EPS 0x1000 /* Even Parity Select */ +#define LCR_SP 0x2000 /* Stick Parity */ +#define LCR_SB 0x4000 /* Set Break */ +#define LCR_DLAB 0x8000 /* Divisor Latch Access Bit */ + +/* MODEM Control Register */ + +#define MCR_DTR 0x0100 /* Data Terminal Ready */ +#define MCR_RTS 0x0200 /* Request to Send */ +#define MCR_OUT1 0x0400 /* Out 1 */ +#define MCR_IRQEN 0x0800 /* IRQ Enable */ +#define MCR_LOOP 0x1000 /* Loop */ + +/* Line Status Register */ + +#define LSR_DR 0x0100 /* Data Ready */ +#define LSR_OE 0x0200 /* Overrun Error */ +#define LSR_PE 0x0400 /* Parity Error */ +#define LSR_FE 0x0800 /* Framing Error */ +#define LSR_BI 0x1000 /* Break Interrupt */ +#define LSR_THRE 0x2000 /* Transmitter Holding Register Empty */ +#define LSR_TEMT 0x4000 /* Transmitter Empty */ +#define LSR_FIFOE 0x8000 /* Receiver FIFO error */ + +/* MODEM Status Register */ + +#define MSR_DCTS 0x0100 /* Delta Clear to Send */ +#define MSR_DDSR 0x0200 /* Delta Data Set Ready */ +#define MSR_TERI 0x0400 /* Trailing Edge Ring Indicator */ +#define MSR_DDCD 0x0800 /* Delta Data Carrier Detect */ +#define MSR_CTS 0x1000 /* Clear to Send */ +#define MSR_DSR 0x2000 /* Data Set Ready */ +#define MSR_RI 0x4000 /* Ring Indicator */ +#define MSR_DCD 0x8000 /* Data Carrier Detect */ + +/* Baud Rate Divisor */ + +#define UART_CLK (1843200) /* 1.8432 MHz */ +#define UART_BAUD(x) (UART_CLK / (16 * (x))) + +/* RTC register definition */ +#define RTC_SECONDS 0 +#define RTC_SECONDS_ALARM 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +#define RTC_DAY_OF_WEEK 6 +#define RTC_DAY_OF_MONTH 7 +#define RTC_MONTH 8 +#define RTC_YEAR 9 +#define RTC_FREQ_SELECT 10 +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 +/* This RTC can work under 32.768KHz clock only. */ +# define RTC_OSC_ENABLE 0x20 +# define RTC_OSC_DISABLE 0x00 +#define RTC_CONTROL 11 +# define RTC_SET 0x80 +# define RTC_PIE 0x40 +# define RTC_AIE 0x20 +# define RTC_UIE 0x10 +# define RTC_SQWE 0x08 +# define RTC_DM_BINARY 0x04 +# define RTC_24H 0x02 +# define RTC_DST_EN 0x01 + +#endif /* __ASM_SH_M1543C_H */ diff --git a/trunk/include/asm-sh/mpc1211/mc146818rtc.h b/trunk/include/asm-sh/mpc1211/mc146818rtc.h new file mode 100644 index 000000000000..e245f2a3cd78 --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/mc146818rtc.h @@ -0,0 +1,6 @@ +/* + * MPC1211 uses PC/AT style RTC definitions. + */ +#include + + diff --git a/trunk/include/asm-sh/mpc1211/mpc1211.h b/trunk/include/asm-sh/mpc1211/mpc1211.h new file mode 100644 index 000000000000..fa456c3e4e01 --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/mpc1211.h @@ -0,0 +1,18 @@ +#ifndef __ASM_SH_MPC1211_H +#define __ASM_SH_MPC1211_H + +/* + * linux/include/asm-sh/mpc1211.h + * + * Copyright (C) 2001 Saito.K & Jeanne + * + * Interface MPC-1211 support + */ + +#define PA_PCI_IO (0xa4000000) /* PCI I/O space */ +#define PA_PCI_MEM (0xb0000000) /* PCI MEM space */ + +#define PCIPAR (0xa4000cf8) /* PCI Config address */ +#define PCIPDR (0xa4000cfc) /* PCI Config data */ + +#endif /* __ASM_SH_MPC1211_H */ diff --git a/trunk/include/asm-sh/mpc1211/pci.h b/trunk/include/asm-sh/mpc1211/pci.h new file mode 100644 index 000000000000..d9162c5ed76a --- /dev/null +++ b/trunk/include/asm-sh/mpc1211/pci.h @@ -0,0 +1,38 @@ +/* + * Low-Level PCI Support for MPC-1211 + * + * (c) 2002 Saito.K & Jeanne + * + */ + +#ifndef _PCI_MPC1211_H_ +#define _PCI_MPC1211_H_ + +#include + +/* set debug level 4=verbose...1=terse */ +//#define DEBUG_PCI 3 +#undef DEBUG_PCI + +#ifdef DEBUG_PCI +#define PCIDBG(n, x...) { if(DEBUG_PCI>=n) printk(x); } +#else +#define PCIDBG(n, x...) +#endif + +/* startup values */ +#define PCI_PROBE_BIOS 1 +#define PCI_PROBE_CONF1 2 +#define PCI_PROBE_CONF2 4 +#define PCI_NO_CHECKS 0x400 +#define PCI_ASSIGN_ROMS 0x1000 +#define PCI_BIOS_IRQ_SCAN 0x2000 + +/* MPC-1211 Specific Values */ +#define PCIPAR (0xa4000cf8) /* PCI Config address */ +#define PCIPDR (0xa4000cfc) /* PCI Config data */ + +#define PA_PCI_IO (0xa4000000) /* PCI I/O space */ +#define PA_PCI_MEM (0xb0000000) /* PCI MEM space */ + +#endif /* _PCI_MPC1211_H_ */ diff --git a/trunk/include/asm-sh/r7780rp.h b/trunk/include/asm-sh/r7780rp.h index 306f7359f7d4..a33838f23a6d 100644 --- a/trunk/include/asm-sh/r7780rp.h +++ b/trunk/include/asm-sh/r7780rp.h @@ -193,6 +193,8 @@ #define IRQ_SCIF0 (HL_FPGA_IRQ_BASE + 15) #define IRQ_SCIF1 (HL_FPGA_IRQ_BASE + 16) -unsigned char *highlander_plat_irq_setup(void); +unsigned char *highlander_init_irq_r7780mp(void); +unsigned char *highlander_init_irq_r7780rp(void); +unsigned char *highlander_init_irq_r7785rp(void); #endif /* __ASM_SH_RENESAS_R7780RP */ diff --git a/trunk/include/asm-sh/tlb_64.h b/trunk/include/asm-sh/tlb_64.h index 0a96f3af69e3..0308e05fc57b 100644 --- a/trunk/include/asm-sh/tlb_64.h +++ b/trunk/include/asm-sh/tlb_64.h @@ -56,7 +56,6 @@ static inline void __flush_tlb_slot(unsigned long long slot) __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot)); } -#ifdef CONFIG_MMU /* arch/sh64/mm/tlb.c */ int sh64_tlb_init(void); unsigned long long sh64_next_free_dtlb_entry(void); @@ -65,13 +64,6 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry); void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr); void sh64_teardown_tlb_slot(unsigned long long config_addr); -#else -#define sh64_tlb_init() do { } while (0) -#define sh64_next_free_dtlb_entry() (0) -#define sh64_get_wired_dtlb_entry() (0) -#define sh64_put_wired_dtlb_entry(entry) do { } while (0) -#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0) -#define sh64_teardown_tlb_slot(addr) do { } while (0) -#endif /* CONFIG_MMU */ + #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLB_64_H */ diff --git a/trunk/include/asm-sh/topology.h b/trunk/include/asm-sh/topology.h index 95f0085e098a..34cdb28e8f44 100644 --- a/trunk/include/asm-sh/topology.h +++ b/trunk/include/asm-sh/topology.h @@ -29,17 +29,6 @@ .nr_balance_failed = 0, \ } -#define cpu_to_node(cpu) ((void)(cpu),0) -#define parent_node(node) ((void)(node),0) - -#define node_to_cpumask(node) ((void)node, cpu_online_map) -#define node_to_first_cpu(node) ((void)(node),0) - -#define pcibus_to_node(bus) ((void)(bus), -1) -#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ - CPU_MASK_ALL : \ - node_to_cpumask(pcibus_to_node(bus)) \ - ) #endif #include diff --git a/trunk/include/asm-sh/uaccess_64.h b/trunk/include/asm-sh/uaccess_64.h index a9b68d094844..f956b7b316c7 100644 --- a/trunk/include/asm-sh/uaccess_64.h +++ b/trunk/include/asm-sh/uaccess_64.h @@ -274,9 +274,7 @@ struct exception_table_entry unsigned long insn, fixup; }; -#ifdef CONFIG_MMU #define ARCH_HAS_SEARCH_EXTABLE -#endif /* Returns 0 if exception not found and fixup.unit otherwise. */ extern unsigned long search_exception_table(unsigned long addr); diff --git a/trunk/include/asm-x86/bitops.h b/trunk/include/asm-x86/bitops.h index ee4b3ead6a43..b81a4d4d3337 100644 --- a/trunk/include/asm-x86/bitops.h +++ b/trunk/include/asm-x86/bitops.h @@ -23,10 +23,13 @@ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) /* Technically wrong, but this avoids compilation errors on some gcc versions. */ -#define ADDR "=m" (*(volatile long *) addr) +#define ADDR "=m" (*(volatile long *)addr) +#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) +#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) #endif +#define BASE_ADDR "m" (*(volatile int *)addr) /** * set_bit - Atomically set a bit in memory @@ -74,7 +77,7 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -93,7 +96,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -128,7 +131,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -142,7 +145,7 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -188,10 +191,9 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + asm volatile("bts %2,%3\n\t" + "sbb %0,%0" + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -227,10 +229,9 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -239,10 +240,9 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -276,10 +276,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%3\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (((volatile const int *)addr)[nr >> 5]), + "Ir" (nr), BASE_ADDR); return oldbit; } @@ -396,6 +397,8 @@ static inline int fls(int x) } #endif /* __KERNEL__ */ +#undef BASE_ADDR +#undef BIT_ADDR #undef ADDR static inline void set_bit_string(unsigned long *bitmap, diff --git a/trunk/include/asm-x86/bootparam.h b/trunk/include/asm-x86/bootparam.h index f62f4733606b..e8659909e5f6 100644 --- a/trunk/include/asm-x86/bootparam.h +++ b/trunk/include/asm-x86/bootparam.h @@ -14,10 +14,10 @@ /* extensible setup data list node */ struct setup_data { - __u64 next; - __u32 type; - __u32 len; - __u8 data[0]; + u64 next; + u32 type; + u32 len; + u8 data[0]; }; struct setup_header { diff --git a/trunk/include/asm-x86/geode.h b/trunk/include/asm-x86/geode.h index 6e6458853a36..7154dc4de951 100644 --- a/trunk/include/asm-x86/geode.h +++ b/trunk/include/asm-x86/geode.h @@ -185,14 +185,16 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } -#ifdef CONFIG_MGEODE_LX -extern int geode_has_vsa2(void); -#else +/* + * The VSA has virtual registers that we can query for a signature. + */ static inline int geode_has_vsa2(void) { - return 0; + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + return (inw(VSA_VRC_DATA) == VSA_SIG); } -#endif /* MFGPTs */ diff --git a/trunk/include/asm-x86/i387.h b/trunk/include/asm-x86/i387.h index 6b722d315936..da2adb45f6e3 100644 --- a/trunk/include/asm-x86/i387.h +++ b/trunk/include/asm-x86/i387.h @@ -175,15 +175,7 @@ static inline int save_i387(struct _fpstate __user *buf) */ static inline int restore_i387(struct _fpstate __user *buf) { - struct task_struct *tsk = current; - int err; - - if (!used_math()) { - err = init_fpu(tsk); - if (err) - return err; - } - + set_used_math(); if (!(task_thread_info(current)->status & TS_USEDFPU)) { clts(); task_thread_info(current)->status |= TS_USEDFPU; diff --git a/trunk/include/asm-x86/kvm_host.h b/trunk/include/asm-x86/kvm_host.h index 1d8cd01fa514..9d963cd6533c 100644 --- a/trunk/include/asm-x86/kvm_host.h +++ b/trunk/include/asm-x86/kvm_host.h @@ -314,9 +314,6 @@ struct kvm_arch{ struct page *apic_access_page; gpa_t wall_clock; - - struct page *ept_identity_pagetable; - bool ept_identity_pagetable_done; }; struct kvm_vm_stat { @@ -425,7 +422,6 @@ struct kvm_x86_ops { struct kvm_run *run); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); - int (*get_tdp_level)(void); }; extern struct kvm_x86_ops *kvm_x86_ops; @@ -437,9 +433,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); -void kvm_mmu_set_base_ptes(u64 base_pte); -void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); @@ -627,7 +620,7 @@ static inline void fx_restore(struct i387_fxsave_struct *image) asm("fxrstor (%0)":: "r" (image)); } -static inline void fx_finit(void) +static inline void fpu_init(void) { asm("finit"); } @@ -651,7 +644,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" -#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" #define MSR_IA32_TIME_STAMP_COUNTER 0x010 diff --git a/trunk/include/asm-x86/pat.h b/trunk/include/asm-x86/pat.h index 88f60cc6a227..8b822b5a1786 100644 --- a/trunk/include/asm-x86/pat.h +++ b/trunk/include/asm-x86/pat.h @@ -4,13 +4,7 @@ #include -#ifdef CONFIG_X86_PAT extern int pat_wc_enabled; -extern void validate_pat_support(struct cpuinfo_x86 *c); -#else -static const int pat_wc_enabled = 0; -static inline void validate_pat_support(struct cpuinfo_x86 *c) { } -#endif extern void pat_init(void); @@ -18,7 +12,5 @@ extern int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *ret_type); extern int free_memtype(u64 start, u64 end); -extern void pat_disable(char *reason); - #endif diff --git a/trunk/include/asm-x86/pgtable_32.h b/trunk/include/asm-x86/pgtable_32.h index d7f0403bbecb..577ab79c4c27 100644 --- a/trunk/include/asm-x86/pgtable_32.h +++ b/trunk/include/asm-x86/pgtable_32.h @@ -88,7 +88,14 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) + +extern int pmd_bad(pmd_t pmd); + +#define pmd_bad_v1(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) +#define pmd_bad_v2(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ + _PAGE_PSE | _PAGE_NX))) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/trunk/include/asm-x86/pgtable_64.h b/trunk/include/asm-x86/pgtable_64.h index efe83dcbd412..a3bbf8766c1d 100644 --- a/trunk/include/asm-x86/pgtable_64.h +++ b/trunk/include/asm-x86/pgtable_64.h @@ -158,12 +158,14 @@ static inline unsigned long pgd_bad(pgd_t pgd) static inline unsigned long pud_bad(pud_t pud) { - return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pud_val(pud) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } static inline unsigned long pmd_bad(pmd_t pmd) { - return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pmd_val(pmd) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } #define pte_none(x) (!pte_val((x))) diff --git a/trunk/include/asm-x86/spinlock.h b/trunk/include/asm-x86/spinlock.h index 21e89bf92f1c..bc6376f1bc5a 100644 --- a/trunk/include/asm-x86/spinlock.h +++ b/trunk/include/asm-x86/spinlock.h @@ -20,8 +20,18 @@ */ #ifdef CONFIG_X86_32 +typedef char _slock_t; +# define LOCK_INS_DEC "decb" +# define LOCK_INS_XCH "xchgb" +# define LOCK_INS_MOV "movb" +# define LOCK_INS_CMP "cmpb" # define LOCK_PTR_REG "a" #else +typedef int _slock_t; +# define LOCK_INS_DEC "decl" +# define LOCK_INS_XCH "xchgl" +# define LOCK_INS_MOV "movl" +# define LOCK_INS_CMP "cmpl" # define LOCK_PTR_REG "D" #endif @@ -56,14 +66,14 @@ #if (NR_CPUS < 256) static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = ACCESS_ONCE(lock->slock); + int tmp = *(volatile signed int *)(&(lock)->slock); return (((tmp >> 8) & 0xff) != (tmp & 0xff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = ACCESS_ONCE(lock->slock); + int tmp = *(volatile signed int *)(&(lock)->slock); return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } @@ -120,14 +130,14 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = ACCESS_ONCE(lock->slock); + int tmp = *(volatile signed int *)(&(lock)->slock); return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = ACCESS_ONCE(lock->slock); + int tmp = *(volatile signed int *)(&(lock)->slock); return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } diff --git a/trunk/include/asm-x86/topology.h b/trunk/include/asm-x86/topology.h index dcf3f8131d6b..4f35a0fb4f22 100644 --- a/trunk/include/asm-x86/topology.h +++ b/trunk/include/asm-x86/topology.h @@ -25,16 +25,6 @@ #ifndef _ASM_X86_TOPOLOGY_H #define _ASM_X86_TOPOLOGY_H -#ifdef CONFIG_X86_32 -# ifdef CONFIG_X86_HT -# define ENABLE_TOPO_DEFINES -# endif -#else -# ifdef CONFIG_SMP -# define ENABLE_TOPO_DEFINES -# endif -#endif - #ifdef CONFIG_NUMA #include #include @@ -140,6 +130,10 @@ extern unsigned long node_end_pfn[]; extern unsigned long node_remap_size[]; #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) +# ifdef CONFIG_X86_HT +# define ENABLE_TOPO_DEFINES +# endif + # define SD_CACHE_NICE_TRIES 1 # define SD_IDLE_IDX 1 # define SD_NEWIDLE_IDX 2 @@ -147,6 +141,10 @@ extern unsigned long node_remap_size[]; #else +# ifdef CONFIG_SMP +# define ENABLE_TOPO_DEFINES +# endif + # define SD_CACHE_NICE_TRIES 2 # define SD_IDLE_IDX 2 # define SD_NEWIDLE_IDX 2 diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index c8bd2daf95ec..dcae0c8d97e6 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -182,16 +182,4 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __section(S) __attribute__ ((__section__(#S))) #endif -/* - * Prevent the compiler from merging or refetching accesses. The compiler - * is also forbidden from reordering successive instances of ACCESS_ONCE(), - * but only when the compiler is aware of some particular ordering. One way - * to make the compiler aware of ordering is to put the two invocations of - * ACCESS_ONCE() in different C statements. - * - * This macro does absolutely -nothing- to prevent the CPU from reordering, - * merging, or refetching absolutely anything at any time. - */ -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) - #endif /* __LINUX_COMPILER_H */ diff --git a/trunk/include/linux/exportfs.h b/trunk/include/linux/exportfs.h index f5abd1306638..de8387b7ceb6 100644 --- a/trunk/include/linux/exportfs.h +++ b/trunk/include/linux/exportfs.h @@ -33,19 +33,6 @@ enum fid_type { * 32 bit parent directory inode number. */ FILEID_INO32_GEN_PARENT = 2, - - /* - * 32 bit block number, 16 bit partition reference, - * 16 bit unused, 32 bit generation number. - */ - FILEID_UDF_WITHOUT_PARENT = 0x51, - - /* - * 32 bit block number, 16 bit partition reference, - * 16 bit unused, 32 bit generation number, - * 32 bit parent block number, 32 bit parent generation number - */ - FILEID_UDF_WITH_PARENT = 0x52, }; struct fid { @@ -56,14 +43,6 @@ struct fid { u32 parent_ino; u32 parent_gen; } i32; - struct { - u32 block; - u16 partref; - u16 parent_partref; - u32 generation; - u32 parent_block; - u32 parent_generation; - } udf; __u32 raw[0]; }; }; diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index f413085f748e..a1ba005d08e7 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -1289,12 +1289,17 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, extern ssize_t vfs_writev(struct file *, const struct iovec __user *, unsigned long, loff_t *); +/* + * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called + * without the big kernel lock held in all filesystems. + */ struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); + void (*put_inode) (struct inode *); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); @@ -1816,6 +1821,7 @@ extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); +extern int __remove_suid(struct dentry *, int); extern int should_remove_suid(struct dentry *); extern int remove_suid(struct dentry *); diff --git a/trunk/include/linux/genhd.h b/trunk/include/linux/genhd.h index e9874e7fcdf9..ecd2bf63fc84 100644 --- a/trunk/include/linux/genhd.h +++ b/trunk/include/linux/genhd.h @@ -178,17 +178,17 @@ static inline struct hd_struct *get_part(struct gendisk *gendiskp, static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { int i; - for_each_possible_cpu(i) memset(per_cpu_ptr(gendiskp->dkstats, i), value, - sizeof(struct disk_stats)); + sizeof (struct disk_stats)); } #define __part_stat_add(part, field, addnd) \ (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd) -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ +#define __all_stat_add(gendiskp, field, addnd, sector) \ ({ \ + struct hd_struct *part = get_part(gendiskp, sector); \ if (part) \ __part_stat_add(part, field, addnd); \ __disk_stat_add(gendiskp, field, addnd); \ @@ -203,13 +203,11 @@ static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { res; \ }) -static inline void part_stat_set_all(struct hd_struct *part, int value) -{ +static inline void part_stat_set_all(struct hd_struct *part, int value) { int i; - for_each_possible_cpu(i) memset(per_cpu_ptr(part->dkstats, i), value, - sizeof(struct disk_stats)); + sizeof(struct disk_stats)); } #else /* !CONFIG_SMP */ @@ -225,8 +223,9 @@ static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) #define __part_stat_add(part, field, addnd) \ (part->dkstats.field += addnd) -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ +#define __all_stat_add(gendiskp, field, addnd, sector) \ ({ \ + struct hd_struct *part = get_part(gendiskp, sector); \ if (part) \ part->dkstats.field += addnd; \ __disk_stat_add(gendiskp, field, addnd); \ @@ -277,10 +276,10 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) #define part_stat_sub(gendiskp, field, subnd) \ part_stat_add(gendiskp, field, -subnd) -#define all_stat_add(gendiskp, part, field, addnd, sector) \ +#define all_stat_add(gendiskp, field, addnd, sector) \ do { \ preempt_disable(); \ - __all_stat_add(gendiskp, part, field, addnd, sector); \ + __all_stat_add(gendiskp, field, addnd, sector); \ preempt_enable(); \ } while (0) @@ -289,15 +288,15 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) #define all_stat_dec(gendiskp, field, sector) \ all_stat_add(gendiskp, field, -1, sector) -#define __all_stat_inc(gendiskp, part, field, sector) \ - __all_stat_add(gendiskp, part, field, 1, sector) -#define all_stat_inc(gendiskp, part, field, sector) \ - all_stat_add(gendiskp, part, field, 1, sector) +#define __all_stat_inc(gendiskp, field, sector) \ + __all_stat_add(gendiskp, field, 1, sector) +#define all_stat_inc(gendiskp, field, sector) \ + all_stat_add(gendiskp, field, 1, sector) -#define __all_stat_sub(gendiskp, part, field, subnd, sector) \ - __all_stat_add(gendiskp, part, field, -subnd, sector) -#define all_stat_sub(gendiskp, part, field, subnd, sector) \ - all_stat_add(gendiskp, part, field, -subnd, sector) +#define __all_stat_sub(gendiskp, field, subnd, sector) \ + __all_stat_add(gendiskp, field, -subnd, sector) +#define all_stat_sub(gendiskp, field, subnd, sector) \ + all_stat_add(gendiskp, field, -subnd, sector) /* Inlines to alloc and free disk stats in struct gendisk */ #ifdef CONFIG_SMP diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h index 181006cc94a0..897f723bd222 100644 --- a/trunk/include/linux/hardirq.h +++ b/trunk/include/linux/hardirq.h @@ -72,14 +72,6 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#if defined(CONFIG_PREEMPT) -# define PREEMPT_INATOMIC_BASE kernel_locked() -# define PREEMPT_CHECK_OFFSET 1 -#else -# define PREEMPT_INATOMIC_BASE 0 -# define PREEMPT_CHECK_OFFSET 0 -#endif - /* * Are we running in atomic context? WARNING: this macro cannot * always detect atomic context; in particular, it cannot know about @@ -87,11 +79,17 @@ * used in the general case to determine whether sleeping is possible. * Do not use in_atomic() in driver code. */ -#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) + +#ifdef CONFIG_PREEMPT +# define PREEMPT_CHECK_OFFSET 1 +#else +# define PREEMPT_CHECK_OFFSET 0 +#endif /* * Check whether we were atomic before we did preempt_disable(): - * (used by the scheduler, *after* releasing the kernel lock) + * (used by the scheduler) */ #define in_atomic_preempt_off() \ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) diff --git a/trunk/include/linux/io.h b/trunk/include/linux/io.h index 6c7f0ba0d5fa..3a03a3604cce 100644 --- a/trunk/include/linux/io.h +++ b/trunk/include/linux/io.h @@ -65,6 +65,5 @@ void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, void devm_iounmap(struct device *dev, void __iomem *addr); int check_signature(const volatile void __iomem *io_addr, const unsigned char *signature, int length); -void devm_ioremap_release(struct device *dev, void *res); #endif /* _LINUX_IO_H */ diff --git a/trunk/include/linux/ioprio.h b/trunk/include/linux/ioprio.h index f98a656b17e5..2a3bb1bb7433 100644 --- a/trunk/include/linux/ioprio.h +++ b/trunk/include/linux/ioprio.h @@ -67,20 +67,6 @@ static inline int task_nice_ioprio(struct task_struct *task) return (task_nice(task) + 20) / 5; } -/* - * This is for the case where the task hasn't asked for a specific IO class. - * Check for idle and rt task process, and return appropriate IO class. - */ -static inline int task_nice_ioclass(struct task_struct *task) -{ - if (task->policy == SCHED_IDLE) - return IOPRIO_CLASS_IDLE; - else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) - return IOPRIO_CLASS_RT; - else - return IOPRIO_CLASS_BE; -} - /* * For inheritance, return the highest of the two given priorities */ diff --git a/trunk/include/linux/kgdb.h b/trunk/include/linux/kgdb.h index 6adcc297e354..9757b1a6d9dc 100644 --- a/trunk/include/linux/kgdb.h +++ b/trunk/include/linux/kgdb.h @@ -261,12 +261,10 @@ struct kgdb_io { extern struct kgdb_arch arch_kgdb_ops; -extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); - extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); -extern int kgdb_hex2long(char **ptr, unsigned long *long_val); +extern int kgdb_hex2long(char **ptr, long *long_val); extern int kgdb_mem2hex(char *mem, char *buf, int count); extern int kgdb_hex2mem(char *buf, char *mem, int count); diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h index 0f17643e0a6e..d1dfe872ee30 100644 --- a/trunk/include/linux/libata.h +++ b/trunk/include/linux/libata.h @@ -1039,7 +1039,6 @@ extern void ata_eh_thaw_port(struct ata_port *ap); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); -extern void ata_eh_analyze_ncq_error(struct ata_link *link); extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, @@ -1382,18 +1381,6 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) return *(struct ata_port **)&host->hostdata[0]; } -static inline int ata_check_ready(u8 status) -{ - if (!(status & ATA_BUSY)) - return 1; - - /* 0xff indicates either no device or device not ready */ - if (status == 0xff) - return -ENODEV; - - return 0; -} - /************************************************************************** * PMP - drivers/ata/libata-pmp.c diff --git a/trunk/include/linux/mv643xx_eth.h b/trunk/include/linux/mv643xx_eth.h index a15cdd4a8e58..30e11aa3c1c9 100644 --- a/trunk/include/linux/mv643xx_eth.h +++ b/trunk/include/linux/mv643xx_eth.h @@ -1,31 +1,19 @@ /* * MV-643XX ethernet platform device data definition file. */ - #ifndef __LINUX_MV643XX_ETH_H #define __LINUX_MV643XX_ETH_H -#include - -#define MV643XX_ETH_SHARED_NAME "mv643xx_eth" -#define MV643XX_ETH_NAME "mv643xx_eth_port" +#define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared" +#define MV643XX_ETH_NAME "mv643xx_eth" #define MV643XX_ETH_SHARED_REGS 0x2000 #define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 #define MV643XX_ETH_BAR_4 0x2220 #define MV643XX_ETH_SIZE_REG_4 0x2224 #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 -struct mv643xx_eth_shared_platform_data { - struct mbus_dram_target_info *dram; - unsigned int t_clk; -}; - struct mv643xx_eth_platform_data { - struct platform_device *shared; int port_number; - - struct platform_device *shared_smi; - u16 force_phy_addr; /* force override if phy_addr == 0 */ u16 phy_addr; diff --git a/trunk/include/linux/netfilter/nf_conntrack_sip.h b/trunk/include/linux/netfilter/nf_conntrack_sip.h index 23aa2ec6b7b7..5da04e586a3f 100644 --- a/trunk/include/linux/netfilter/nf_conntrack_sip.h +++ b/trunk/include/linux/netfilter/nf_conntrack_sip.h @@ -7,7 +7,6 @@ struct nf_ct_sip_master { unsigned int register_cseq; - unsigned int invite_cseq; }; enum sip_expectation_classes { diff --git a/trunk/include/linux/of_i2c.h b/trunk/include/linux/of_i2c.h index bd2a870ec296..2e5a96732042 100644 --- a/trunk/include/linux/of_i2c.h +++ b/trunk/include/linux/of_i2c.h @@ -14,7 +14,11 @@ #include +#ifdef CONFIG_OF_I2C + void of_register_i2c_devices(struct i2c_adapter *adap, struct device_node *adap_node); +#endif /* CONFIG_OF_I2C */ + #endif /* __LINUX_OF_I2C_H */ diff --git a/trunk/include/linux/pci.h b/trunk/include/linux/pci.h index 509159bcd4e7..96acd0dae241 100644 --- a/trunk/include/linux/pci.h +++ b/trunk/include/linux/pci.h @@ -44,7 +44,6 @@ #include #include -#include #include #include #include @@ -475,7 +474,7 @@ extern struct pci_bus *pci_find_bus(int domain, int busnr); void pci_bus_add_devices(struct pci_bus *bus); struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); -static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, +static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) { struct pci_bus *root_bus; @@ -667,7 +666,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), void *userdata); -int pci_cfg_space_size_ext(struct pci_dev *dev); +int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); diff --git a/trunk/include/linux/phy.h b/trunk/include/linux/phy.h index 7224c4099a28..02df20f085fe 100644 --- a/trunk/include/linux/phy.h +++ b/trunk/include/linux/phy.h @@ -412,8 +412,6 @@ int mdiobus_register(struct mii_bus *bus); void mdiobus_unregister(struct mii_bus *bus); void phy_sanitize_settings(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); -int phy_enable_interrupts(struct phy_device *phydev); -int phy_disable_interrupts(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { return phydev->drv->read_status(phydev); @@ -449,8 +447,5 @@ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); int phy_scan_fixups(struct phy_device *phydev); -int __init mdio_bus_init(void); -void mdio_bus_exit(void); - extern struct bus_type mdio_bus_type; #endif /* __PHY_H */ diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index d42dbec06083..8082d6587a0f 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -131,6 +131,18 @@ struct rcu_head { */ #define rcu_read_unlock_bh() __rcu_read_unlock_bh() +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. + */ +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + /** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 0c35b0343a76..03c238088aee 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -158,8 +158,6 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) } #endif -extern unsigned long long time_sync_thresh; - /* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). @@ -1553,35 +1551,6 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} - -static inline void sched_clock_tick(void) -{ -} - -static inline void sched_clock_idle_sleep_event(void) -{ -} - -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) -{ -} -#else -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); -extern void sched_clock_tick(void); -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#endif - /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu * clock constructed from sched_clock(): @@ -2008,11 +1977,6 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk) clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); } -static inline int test_tsk_need_resched(struct task_struct *tsk) -{ - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); -} - static inline int signal_pending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); @@ -2027,7 +1991,7 @@ static inline int fatal_signal_pending(struct task_struct *p) static inline int need_resched(void) { - return unlikely(test_tsk_need_resched(current)); + return unlikely(test_thread_flag(TIF_NEED_RESCHED)); } /* diff --git a/trunk/include/linux/sysfs.h b/trunk/include/linux/sysfs.h index 7858eac40aa7..27bad59dae79 100644 --- a/trunk/include/linux/sysfs.h +++ b/trunk/include/linux/sysfs.h @@ -196,6 +196,12 @@ static inline int sysfs_update_group(struct kobject *kobj, return 0; } +static inline int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + static inline void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { diff --git a/trunk/include/linux/vermagic.h b/trunk/include/linux/vermagic.h index 79b9837d9ca0..4d0909e53595 100644 --- a/trunk/include/linux/vermagic.h +++ b/trunk/include/linux/vermagic.h @@ -17,11 +17,6 @@ #else #define MODULE_VERMAGIC_MODULE_UNLOAD "" #endif -#ifdef CONFIG_MODVERSIONS -#define MODULE_VERMAGIC_MODVERSIONS "modversions " -#else -#define MODULE_VERMAGIC_MODVERSIONS "" -#endif #ifndef MODULE_ARCH_VERMAGIC #define MODULE_ARCH_VERMAGIC "" #endif @@ -29,6 +24,5 @@ #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ - MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ - MODULE_ARCH_VERMAGIC + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC diff --git a/trunk/include/net/ip.h b/trunk/include/net/ip.h index 3b40bc2234be..6d7bcd5e62d4 100644 --- a/trunk/include/net/ip.h +++ b/trunk/include/net/ip.h @@ -210,7 +210,7 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) { return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && - !(dst_metric_locked(dst, RTAX_MTU)))); + !(dst_metric(dst, RTAX_LOCK)&(1<relax_domain_level) { @@ -1278,6 +1280,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont, case FILE_MEMLIST: retval = update_nodemask(cs, buffer); break; + case FILE_SCHED_RELAX_DOMAIN_LEVEL: + retval = update_relax_domain_level(cs, buffer); + break; default: retval = -EINVAL; goto out2; @@ -1343,30 +1348,6 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) return retval; } -static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) -{ - int retval = 0; - struct cpuset *cs = cgroup_cs(cgrp); - cpuset_filetype_t type = cft->private; - - cgroup_lock(); - - if (cgroup_is_removed(cgrp)) { - cgroup_unlock(); - return -ENODEV; - } - switch (type) { - case FILE_SCHED_RELAX_DOMAIN_LEVEL: - retval = update_relax_domain_level(cs, val); - break; - default: - retval = -EINVAL; - break; - } - cgroup_unlock(); - return retval; -} - /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller @@ -1425,6 +1406,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont, case FILE_MEMLIST: s += cpuset_sprintf_memlist(s, cs); break; + case FILE_SCHED_RELAX_DOMAIN_LEVEL: + s += sprintf(s, "%d", cs->relax_domain_level); + break; default: retval = -EINVAL; goto out; @@ -1465,18 +1449,6 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) } } -static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) -{ - struct cpuset *cs = cgroup_cs(cont); - cpuset_filetype_t type = cft->private; - switch (type) { - case FILE_SCHED_RELAX_DOMAIN_LEVEL: - return cs->relax_domain_level; - default: - BUG(); - } -} - /* * for the common functions, 'private' gives the type of file @@ -1527,8 +1499,8 @@ static struct cftype files[] = { { .name = "sched_relax_domain_level", - .read_s64 = cpuset_read_s64, - .write_s64 = cpuset_write_s64, + .read_u64 = cpuset_read_u64, + .write_u64 = cpuset_write_u64, .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, }, diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 449def8074fe..98092c9817f4 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -104,6 +104,10 @@ struct futex_q { /* Key which the futex is hashed on: */ union futex_key key; + /* For fd, sigio sent using these: */ + int fd; + struct file *filp; + /* Optional priority inheritance state: */ struct futex_pi_state *pi_state; struct task_struct *task; @@ -122,6 +126,9 @@ struct futex_hash_bucket { static struct futex_hash_bucket futex_queues[1<mmap_sem, when futex is shared */ @@ -603,6 +610,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, static void wake_futex(struct futex_q *q) { plist_del(&q->list, &q->list.plist); + if (q->filp) + send_sigio(&q->filp->f_owner, q->fd, POLL_IN); /* * The lock in wake_up_all() is a crucial memory barrier after the * plist_del() and also before assigning to q->lock_ptr. @@ -979,10 +988,14 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, } /* The key must be already stored in q->key. */ -static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) +static inline struct futex_hash_bucket * +queue_lock(struct futex_q *q, int fd, struct file *filp) { struct futex_hash_bucket *hb; + q->fd = fd; + q->filp = filp; + init_waitqueue_head(&q->waiters); get_futex_key_refs(&q->key); @@ -993,7 +1006,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) return hb; } -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) { int prio; @@ -1028,6 +1041,15 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) * exactly once. They are called with the hashed spinlock held. */ +/* The key must be already stored in q->key. */ +static void queue_me(struct futex_q *q, int fd, struct file *filp) +{ + struct futex_hash_bucket *hb; + + hb = queue_lock(q, fd, filp); + __queue_me(q, hb); +} + /* Return 1 if we were still queued (ie. 0 means we were woken) */ static int unqueue_me(struct futex_q *q) { @@ -1172,7 +1194,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, if (unlikely(ret != 0)) goto out_release_sem; - hb = queue_lock(&q); + hb = queue_lock(&q, -1, NULL); /* * Access the page AFTER the futex is queued. @@ -1216,7 +1238,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, goto out_unlock_release_sem; /* Only actually queue if *uaddr contained val. */ - queue_me(&q, hb); + __queue_me(&q, hb); /* * Now the futex is queued and we have checked the data, we @@ -1364,7 +1386,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, goto out_release_sem; retry_unlocked: - hb = queue_lock(&q); + hb = queue_lock(&q, -1, NULL); retry_locked: ret = lock_taken = 0; @@ -1477,7 +1499,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, /* * Only actually queue now that the atomic ops are done: */ - queue_me(&q, hb); + __queue_me(&q, hb); /* * Now the futex is queued and we have checked the data, we @@ -1724,6 +1746,121 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) return ret; } +static int futex_close(struct inode *inode, struct file *filp) +{ + struct futex_q *q = filp->private_data; + + unqueue_me(q); + kfree(q); + + return 0; +} + +/* This is one-shot: once it's gone off you need a new fd */ +static unsigned int futex_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct futex_q *q = filp->private_data; + int ret = 0; + + poll_wait(filp, &q->waiters, wait); + + /* + * plist_node_empty() is safe here without any lock. + * q->lock_ptr != 0 is not safe, because of ordering against wakeup. + */ + if (plist_node_empty(&q->list)) + ret = POLLIN | POLLRDNORM; + + return ret; +} + +static const struct file_operations futex_fops = { + .release = futex_close, + .poll = futex_poll, +}; + +/* + * Signal allows caller to avoid the race which would occur if they + * set the sigio stuff up afterwards. + */ +static int futex_fd(u32 __user *uaddr, int signal) +{ + struct futex_q *q; + struct file *filp; + int ret, err; + struct rw_semaphore *fshared; + static unsigned long printk_interval; + + if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { + printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " + "will be removed from the kernel in June 2007\n", + current->comm); + } + + ret = -EINVAL; + if (!valid_signal(signal)) + goto out; + + ret = get_unused_fd(); + if (ret < 0) + goto out; + filp = get_empty_filp(); + if (!filp) { + put_unused_fd(ret); + ret = -ENFILE; + goto out; + } + filp->f_op = &futex_fops; + filp->f_path.mnt = mntget(futex_mnt); + filp->f_path.dentry = dget(futex_mnt->mnt_root); + filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; + + if (signal) { + err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1); + if (err < 0) { + goto error; + } + filp->f_owner.signum = signal; + } + + q = kmalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + err = -ENOMEM; + goto error; + } + q->pi_state = NULL; + + fshared = ¤t->mm->mmap_sem; + down_read(fshared); + err = get_futex_key(uaddr, fshared, &q->key); + + if (unlikely(err != 0)) { + up_read(fshared); + kfree(q); + goto error; + } + + /* + * queue_me() must be called before releasing mmap_sem, because + * key->shared.inode needs to be referenced while holding it. + */ + filp->private_data = q; + + queue_me(q, ret, filp); + up_read(fshared); + + /* Now we map fd to filp, so userspace can access it */ + fd_install(ret, filp); +out: + return ret; +error: + put_unused_fd(ret); + put_filp(filp); + ret = err; + goto out; +} + /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. @@ -1955,6 +2092,10 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, case FUTEX_WAKE_BITSET: ret = futex_wake(uaddr, fshared, val, val3); break; + case FUTEX_FD: + /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ + ret = futex_fd(uaddr, val); + break; case FUTEX_REQUEUE: ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); break; @@ -2015,6 +2156,19 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } +static int futexfs_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt); +} + +static struct file_system_type futex_fs_type = { + .name = "futexfs", + .get_sb = futexfs_get_sb, + .kill_sb = kill_anon_super, +}; + static int __init futex_init(void) { u32 curval; @@ -2039,6 +2193,16 @@ static int __init futex_init(void) spin_lock_init(&futex_queues[i].lock); } + i = register_filesystem(&futex_fs_type); + if (i) + return i; + + futex_mnt = kern_mount(&futex_fs_type); + if (IS_ERR(futex_mnt)) { + unregister_filesystem(&futex_fs_type); + return PTR_ERR(futex_mnt); + } + return 0; } __initcall(futex_init); diff --git a/trunk/kernel/kgdb.c b/trunk/kernel/kgdb.c index 39e31a036f5b..1bd0ec1c80b2 100644 --- a/trunk/kernel/kgdb.c +++ b/trunk/kernel/kgdb.c @@ -61,7 +61,7 @@ struct kgdb_state { int err_code; int cpu; int pass_exception; - unsigned long threadid; + long threadid; long kgdb_usethreadid; struct pt_regs *linux_regs; }; @@ -146,7 +146,7 @@ atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); * the other CPUs might interfere with your debugging context, so * use this with care: */ -static int kgdb_do_roundup = 1; +int kgdb_do_roundup = 1; static int __init opt_nokgdbroundup(char *str) { @@ -438,7 +438,7 @@ int kgdb_hex2mem(char *buf, char *mem, int count) * While we find nice hex chars, build a long_val. * Return number of chars processed. */ -int kgdb_hex2long(char **ptr, unsigned long *long_val) +int kgdb_hex2long(char **ptr, long *long_val) { int hex_val; int num = 0; @@ -709,7 +709,7 @@ int kgdb_isremovedbreak(unsigned long addr) return 0; } -static int remove_all_break(void) +int remove_all_break(void) { unsigned long addr; int error; diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index f5e9491ef7ac..8674a390a2e8 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -890,19 +890,6 @@ static struct module_attribute *modinfo_attrs[] = { static const char vermagic[] = VERMAGIC_STRING; -static int try_to_force_load(struct module *mod, const char *symname) -{ -#ifdef CONFIG_MODULE_FORCE_LOAD - if (!(tainted & TAINT_FORCED_MODULE)) - printk("%s: no version for \"%s\" found: kernel tainted.\n", - mod->name, symname); - add_taint_module(mod, TAINT_FORCED_MODULE); - return 0; -#else - return -ENOEXEC; -#endif -} - #ifdef CONFIG_MODVERSIONS static int check_version(Elf_Shdr *sechdrs, unsigned int versindex, @@ -917,10 +904,6 @@ static int check_version(Elf_Shdr *sechdrs, if (!crc) return 1; - /* No versions at all? modprobe --force does this. */ - if (versindex == 0) - return try_to_force_load(mod, symname) == 0; - versions = (void *) sechdrs[versindex].sh_addr; num_versions = sechdrs[versindex].sh_size / sizeof(struct modversion_info); @@ -931,19 +914,18 @@ static int check_version(Elf_Shdr *sechdrs, if (versions[i].crc == *crc) return 1; + printk("%s: disagrees about version of symbol %s\n", + mod->name, symname); DEBUGP("Found checksum %lX vs module %lX\n", *crc, versions[i].crc); - goto bad_version; + return 0; } - - printk(KERN_WARNING "%s: no symbol version for %s\n", - mod->name, symname); - return 0; - -bad_version: - printk("%s: disagrees about version of symbol %s\n", - mod->name, symname); - return 0; + /* Not in module's version table. OK, but that taints the kernel. */ + if (!(tainted & TAINT_FORCED_MODULE)) + printk("%s: no version for \"%s\" found: kernel tainted.\n", + mod->name, symname); + add_taint_module(mod, TAINT_FORCED_MODULE); + return 1; } static inline int check_modstruct_version(Elf_Shdr *sechdrs, @@ -957,14 +939,11 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, return check_version(sechdrs, versindex, "struct_module", mod, crc); } -/* First part is kernel version, which we ignore if module has crcs. */ -static inline int same_magic(const char *amagic, const char *bmagic, - bool has_crcs) +/* First part is kernel version, which we ignore. */ +static inline int same_magic(const char *amagic, const char *bmagic) { - if (has_crcs) { - amagic += strcspn(amagic, " "); - bmagic += strcspn(bmagic, " "); - } + amagic += strcspn(amagic, " "); + bmagic += strcspn(bmagic, " "); return strcmp(amagic, bmagic) == 0; } #else @@ -984,8 +963,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, return 1; } -static inline int same_magic(const char *amagic, const char *bmagic, - bool has_crcs) +static inline int same_magic(const char *amagic, const char *bmagic) { return strcmp(amagic, bmagic) == 0; } @@ -1875,10 +1853,10 @@ static struct module *load_module(void __user *umod, modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); /* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { - err = try_to_force_load(mod, "magic"); - if (err) - goto free_hdr; - } else if (!same_magic(modmagic, vermagic, versindex)) { + add_taint_module(mod, TAINT_FORCED_MODULE); + printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", + mod->name); + } else if (!same_magic(modmagic, vermagic)) { printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", mod->name, modmagic, vermagic); err = -ENOEXEC; @@ -2028,10 +2006,9 @@ static struct module *load_module(void __user *umod, (mod->num_gpl_future_syms && !gplfuturecrcindex) || (mod->num_unused_syms && !unusedcrcindex) || (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { - printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); - err = try_to_force_load(mod, "nocrc"); - if (err) - goto cleanup; + printk(KERN_WARNING "%s: No versions for exported symbols." + " Tainting kernel.\n", mod->name); + add_taint_module(mod, TAINT_FORCED_MODULE); } #endif markersindex = find_sec(hdr, sechdrs, secstrings, "__markers"); diff --git a/trunk/kernel/relay.c b/trunk/kernel/relay.c index bc24dcdc570f..7de644cdec43 100644 --- a/trunk/kernel/relay.c +++ b/trunk/kernel/relay.c @@ -1191,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in, ret = 0; spliced = 0; - while (len) { + while (len && !spliced) { ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); if (ret < 0) break; diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index c51b6565e07c..34bcc5bc120e 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -74,6 +74,16 @@ #include #include +/* + * Scheduler clock - returns current time in nanosec units. + * This is default implementation. + * Architectures and sub-architectures can override this. + */ +unsigned long long __attribute__((weak)) sched_clock(void) +{ + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); +} + /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], @@ -232,12 +242,6 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) } #endif -/* - * sched_domains_mutex serializes calls to arch_init_sched_domains, - * detach_destroy_domains and partition_sched_domains. - */ -static DEFINE_MUTEX(sched_domains_mutex); - #ifdef CONFIG_GROUP_SCHED #include @@ -304,6 +308,9 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; */ static DEFINE_SPINLOCK(task_group_lock); +/* doms_cur_mutex serializes access to doms_cur[] array */ +static DEFINE_MUTEX(doms_cur_mutex); + #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_USER_SCHED # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) @@ -311,13 +318,7 @@ static DEFINE_SPINLOCK(task_group_lock); # define INIT_TASK_GROUP_LOAD NICE_0_LOAD #endif -/* - * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. - * (The default weight is 1024 - so there's no practical - * limitation from this.) - */ #define MIN_SHARES 2 -#define MAX_SHARES (ULONG_MAX - 1) static int init_task_group_load = INIT_TASK_GROUP_LOAD; #endif @@ -357,9 +358,21 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) #endif } +static inline void lock_doms_cur(void) +{ + mutex_lock(&doms_cur_mutex); +} + +static inline void unlock_doms_cur(void) +{ + mutex_unlock(&doms_cur_mutex); +} + #else static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } +static inline void lock_doms_cur(void) { } +static inline void unlock_doms_cur(void) { } #endif /* CONFIG_GROUP_SCHED */ @@ -547,7 +560,13 @@ struct rq { unsigned long next_balance; struct mm_struct *prev_mm; - u64 clock; + u64 clock, prev_clock_raw; + s64 clock_max_delta; + + unsigned int clock_warps, clock_overflows, clock_underflows; + u64 idle_clock; + unsigned int clock_deep_idle_events; + u64 tick_timestamp; atomic_t nr_iowait; @@ -612,6 +631,82 @@ static inline int cpu_of(struct rq *rq) #endif } +#ifdef CONFIG_NO_HZ +static inline bool nohz_on(int cpu) +{ + return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE; +} + +static inline u64 max_skipped_ticks(struct rq *rq) +{ + return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1; +} + +static inline void update_last_tick_seen(struct rq *rq) +{ + rq->last_tick_seen = jiffies; +} +#else +static inline u64 max_skipped_ticks(struct rq *rq) +{ + return 1; +} + +static inline void update_last_tick_seen(struct rq *rq) +{ +} +#endif + +/* + * Update the per-runqueue clock, as finegrained as the platform can give + * us, but without assuming monotonicity, etc.: + */ +static void __update_rq_clock(struct rq *rq) +{ + u64 prev_raw = rq->prev_clock_raw; + u64 now = sched_clock(); + s64 delta = now - prev_raw; + u64 clock = rq->clock; + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); +#endif + /* + * Protect against sched_clock() occasionally going backwards: + */ + if (unlikely(delta < 0)) { + clock++; + rq->clock_warps++; + } else { + /* + * Catch too large forward jumps too: + */ + u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC; + u64 max_time = rq->tick_timestamp + max_jump; + + if (unlikely(clock + delta > max_time)) { + if (clock < max_time) + clock = max_time; + else + clock++; + rq->clock_overflows++; + } else { + if (unlikely(delta > rq->clock_max_delta)) + rq->clock_max_delta = delta; + clock += delta; + } + } + + rq->prev_clock_raw = now; + rq->clock = clock; +} + +static void update_rq_clock(struct rq *rq) +{ + if (likely(smp_processor_id() == cpu_of(rq))) + __update_rq_clock(rq); +} + /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details. @@ -627,11 +722,6 @@ static inline int cpu_of(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) -static inline void update_rq_clock(struct rq *rq) -{ - rq->clock = sched_clock_cpu(cpu_of(rq)); -} - /* * Tunables that become constants when CONFIG_SCHED_DEBUG is off: */ @@ -667,14 +757,14 @@ const_debug unsigned int sysctl_sched_features = #define SCHED_FEAT(name, enabled) \ #name , -static __read_mostly char *sched_feat_names[] = { +__read_mostly char *sched_feat_names[] = { #include "sched_features.h" NULL }; #undef SCHED_FEAT -static int sched_feat_open(struct inode *inode, struct file *filp) +int sched_feat_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; @@ -809,7 +899,7 @@ static inline u64 global_rt_runtime(void) return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; } -unsigned long long time_sync_thresh = 100000; +static const unsigned long long time_sync_thresh = 100000; static DEFINE_PER_CPU(unsigned long long, time_offset); static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); @@ -823,14 +913,11 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); static DEFINE_SPINLOCK(time_sync_lock); static unsigned long long prev_global_time; -static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu) +static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) { - /* - * We want this inlined, to not get tracer function calls - * in this critical section: - */ - spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_); - __raw_spin_lock(&time_sync_lock.raw_lock); + unsigned long flags; + + spin_lock_irqsave(&time_sync_lock, flags); if (time < prev_global_time) { per_cpu(time_offset, cpu) += prev_global_time - time; @@ -839,8 +926,7 @@ static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu) prev_global_time = time; } - __raw_spin_unlock(&time_sync_lock.raw_lock); - spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_); + spin_unlock_irqrestore(&time_sync_lock, flags); return time; } @@ -848,6 +934,8 @@ static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu) static unsigned long long __cpu_clock(int cpu) { unsigned long long now; + unsigned long flags; + struct rq *rq; /* * Only call sched_clock() if the scheduler has already been @@ -856,7 +944,11 @@ static unsigned long long __cpu_clock(int cpu) if (unlikely(!scheduler_running)) return 0; - now = sched_clock_cpu(cpu); + local_irq_save(flags); + rq = cpu_rq(cpu); + update_rq_clock(rq); + now = rq->clock; + local_irq_restore(flags); return now; } @@ -868,18 +960,13 @@ static unsigned long long __cpu_clock(int cpu) unsigned long long cpu_clock(int cpu) { unsigned long long prev_cpu_time, time, delta_time; - unsigned long flags; - local_irq_save(flags); prev_cpu_time = per_cpu(prev_cpu_time, cpu); time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); delta_time = time-prev_cpu_time; - if (unlikely(delta_time > time_sync_thresh)) { + if (unlikely(delta_time > time_sync_thresh)) time = __sync_cpu_clock(time, cpu); - per_cpu(prev_cpu_time, cpu) = time; - } - local_irq_restore(flags); return time; } @@ -1030,6 +1117,43 @@ static struct rq *this_rq_lock(void) return rq; } +/* + * We are going deep-idle (irqs are disabled): + */ +void sched_clock_idle_sleep_event(void) +{ + struct rq *rq = cpu_rq(smp_processor_id()); + + spin_lock(&rq->lock); + __update_rq_clock(rq); + spin_unlock(&rq->lock); + rq->clock_deep_idle_events++; +} +EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); + +/* + * We just idled delta nanoseconds (called with irqs disabled): + */ +void sched_clock_idle_wakeup_event(u64 delta_ns) +{ + struct rq *rq = cpu_rq(smp_processor_id()); + u64 now = sched_clock(); + + rq->idle_clock += delta_ns; + /* + * Override the previous timestamp and ignore all + * sched_clock() deltas that occured while we idled, + * and use the PM-provided delta_ns to advance the + * rq clock: + */ + spin_lock(&rq->lock); + rq->prev_clock_raw = now; + rq->clock += delta_ns; + spin_unlock(&rq->lock); + touch_softlockup_watchdog(); +} +EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); + static void __resched_task(struct task_struct *p, int tif_bit); static inline void resched_task(struct task_struct *p) @@ -1065,7 +1189,6 @@ static inline void resched_rq(struct rq *rq) enum { HRTICK_SET, /* re-programm hrtick_timer */ HRTICK_RESET, /* not a new slice */ - HRTICK_BLOCK, /* stop hrtick operations */ }; /* @@ -1077,8 +1200,6 @@ static inline int hrtick_enabled(struct rq *rq) { if (!sched_feat(HRTICK)) return 0; - if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) - return 0; return hrtimer_is_hres_active(&rq->hrtick_timer); } @@ -1154,70 +1275,14 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); spin_lock(&rq->lock); - update_rq_clock(rq); + __update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); spin_unlock(&rq->lock); return HRTIMER_NORESTART; } -static void hotplug_hrtick_disable(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - rq->hrtick_flags = 0; - __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); - spin_unlock_irqrestore(&rq->lock, flags); - - hrtick_clear(rq); -} - -static void hotplug_hrtick_enable(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); - spin_unlock_irqrestore(&rq->lock, flags); -} - -static int -hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (int)(long)hcpu; - - switch (action) { - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - hotplug_hrtick_disable(cpu); - return NOTIFY_OK; - - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hotplug_hrtick_enable(cpu); - return NOTIFY_OK; - } - - return NOTIFY_DONE; -} - -static void init_hrtick(void) -{ - hotcpu_notifier(hotplug_hrtick, 0); -} - -static void init_rq_hrtick(struct rq *rq) +static inline void init_rq_hrtick(struct rq *rq) { rq->hrtick_flags = 0; hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1254,10 +1319,6 @@ static inline void init_rq_hrtick(struct rq *rq) void hrtick_resched(void) { } - -static inline void init_hrtick(void) -{ -} #endif /* @@ -1377,8 +1438,8 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, { u64 tmp; - if (!lw->inv_weight) - lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); + if (unlikely(!lw->inv_weight)) + lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1); tmp = (u64)delta_exec * weight; /* @@ -1687,8 +1748,6 @@ __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd, if (shares < MIN_SHARES) shares = MIN_SHARES; - else if (shares > MAX_SHARES) - shares = MAX_SHARES; __set_se_shares(tg->se[tcpu], shares); } @@ -4280,10 +4339,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, struct rq *rq = this_rq(); cputime64_t tmp; - if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { - account_guest_time(p, cputime); - return; - } + if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) + return account_guest_time(p, cputime); p->stime = cputime_add(p->stime, cputime); @@ -4347,11 +4404,19 @@ void scheduler_tick(void) int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; - - sched_clock_tick(); + u64 next_tick = rq->tick_timestamp + TICK_NSEC; spin_lock(&rq->lock); - update_rq_clock(rq); + __update_rq_clock(rq); + /* + * Let rq->clock advance by at least TICK_NSEC: + */ + if (unlikely(rq->clock < next_tick)) { + rq->clock = next_tick; + rq->clock_underflows++; + } + rq->tick_timestamp = rq->clock; + update_last_tick_seen(rq); update_cpu_load(rq); curr->sched_class->task_tick(rq, curr, 0); spin_unlock(&rq->lock); @@ -4505,7 +4570,7 @@ asmlinkage void __sched schedule(void) * Do the rq-clock update outside the rq lock: */ local_irq_disable(); - update_rq_clock(rq); + __update_rq_clock(rq); spin_lock(&rq->lock); clear_tsk_need_resched(prev); @@ -4530,9 +4595,9 @@ asmlinkage void __sched schedule(void) prev->sched_class->put_prev_task(rq, prev); next = pick_next_task(rq, prev); - if (likely(prev != next)) { - sched_info_switch(prev, next); + sched_info_switch(prev, next); + if (likely(prev != next)) { rq->nr_switches++; rq->curr = next; ++*switch_count; @@ -4567,6 +4632,8 @@ EXPORT_SYMBOL(schedule); asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); + struct task_struct *task = current; + int saved_lock_depth; /* * If there is a non-zero preempt_count or interrupts are disabled, @@ -4577,7 +4644,16 @@ asmlinkage void __sched preempt_schedule(void) do { add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; schedule(); + task->lock_depth = saved_lock_depth; sub_preempt_count(PREEMPT_ACTIVE); /* @@ -4598,15 +4674,26 @@ EXPORT_SYMBOL(preempt_schedule); asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); + struct task_struct *task = current; + int saved_lock_depth; /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); do { add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; local_irq_enable(); schedule(); local_irq_disable(); + task->lock_depth = saved_lock_depth; sub_preempt_count(PREEMPT_ACTIVE); /* @@ -5831,11 +5918,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ -#if defined(CONFIG_PREEMPT) - task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); -#else task_thread_info(idle)->preempt_count = 0; -#endif + /* * The idle tasks have their own, simple scheduling class: */ @@ -7671,7 +7755,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, { int i, j; - mutex_lock(&sched_domains_mutex); + lock_doms_cur(); /* always unregister in case we don't destroy any domains */ unregister_sched_domain_sysctl(); @@ -7720,7 +7804,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, register_sched_domain_sysctl(); - mutex_unlock(&sched_domains_mutex); + unlock_doms_cur(); } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -7729,10 +7813,8 @@ int arch_reinit_sched_domains(void) int err; get_online_cpus(); - mutex_lock(&sched_domains_mutex); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); - mutex_unlock(&sched_domains_mutex); put_online_cpus(); return err; @@ -7850,16 +7932,13 @@ void __init sched_init_smp(void) BUG_ON(sched_group_nodes_bycpu == NULL); #endif get_online_cpus(); - mutex_lock(&sched_domains_mutex); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); - mutex_unlock(&sched_domains_mutex); put_online_cpus(); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); - init_hrtick(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) @@ -7946,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, se->my_q = cfs_rq; se->load.weight = tg->shares; - se->load.inv_weight = 0; + se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight); se->parent = parent; } #endif @@ -8070,6 +8149,8 @@ void __init sched_init(void) spin_lock_init(&rq->lock); lockdep_set_class(&rq->lock, &rq->rq_lock_key); rq->nr_running = 0; + rq->clock = 1; + update_last_tick_seen(rq); init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED @@ -8213,7 +8294,6 @@ EXPORT_SYMBOL(__might_sleep); static void normalize_task(struct rq *rq, struct task_struct *p) { int on_rq; - update_rq_clock(rq); on_rq = p->se.on_rq; if (on_rq) @@ -8245,6 +8325,7 @@ void normalize_rt_tasks(void) p->se.sleep_start = 0; p->se.block_start = 0; #endif + task_rq(p)->clock = 0; if (!rt_task(p)) { /* @@ -8611,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) dequeue_entity(cfs_rq, se, 0); se->load.weight = shares; - se->load.inv_weight = 0; + se->load.inv_weight = div64_u64((1ULL<<32), shares); if (on_rq) enqueue_entity(cfs_rq, se, 0); @@ -8641,10 +8722,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) if (!tg->se[0]) return -EINVAL; + /* + * A weight of 0 or 1 can cause arithmetics problems. + * (The default weight is 1024 - so there's no practical + * limitation from this.) + */ if (shares < MIN_SHARES) shares = MIN_SHARES; - else if (shares > MAX_SHARES) - shares = MAX_SHARES; mutex_lock(&shares_mutex); if (tg->shares == shares) @@ -8669,7 +8753,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) * force a rebalance */ cfs_rq_set_shares(tg->cfs_rq[i], 0); - set_se_shares(tg->se[i], shares); + set_se_shares(tg->se[i], shares/nr_cpu_ids); } /* diff --git a/trunk/kernel/sched_clock.c b/trunk/kernel/sched_clock.c deleted file mode 100644 index 9c597e37f7de..000000000000 --- a/trunk/kernel/sched_clock.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * sched_clock for unstable cpu clocks - * - * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra - * - * Based on code by: - * Ingo Molnar - * Guillaume Chazarain - * - * Create a semi stable clock from a mixture of other events, including: - * - gtod - * - jiffies - * - sched_clock() - * - explicit idle events - * - * We use gtod as base and the unstable clock deltas. The deltas are filtered, - * making it monotonic and keeping it within an expected window. This window - * is set up using jiffies. - * - * Furthermore, explicit sleep and wakeup hooks allow us to account for time - * that is otherwise invisible (TSC gets stopped). - * - * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat - * consistent between cpus (never more than 1 jiffies difference). - */ -#include -#include -#include -#include -#include - - -#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK - -struct sched_clock_data { - /* - * Raw spinlock - this is a special case: this might be called - * from within instrumentation code so we dont want to do any - * instrumentation ourselves. - */ - raw_spinlock_t lock; - - unsigned long prev_jiffies; - u64 prev_raw; - u64 tick_raw; - u64 tick_gtod; - u64 clock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); - -static inline struct sched_clock_data *this_scd(void) -{ - return &__get_cpu_var(sched_clock_data); -} - -static inline struct sched_clock_data *cpu_sdc(int cpu) -{ - return &per_cpu(sched_clock_data, cpu); -} - -void sched_clock_init(void) -{ - u64 ktime_now = ktime_to_ns(ktime_get()); - u64 now = 0; - int cpu; - - for_each_possible_cpu(cpu) { - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; - scd->prev_jiffies = jiffies; - scd->prev_raw = now; - scd->tick_raw = now; - scd->tick_gtod = ktime_now; - scd->clock = ktime_now; - } -} - -/* - * update the percpu scd from the raw @now value - * - * - filter out backward motion - * - use jiffies to generate a min,max window to clip the raw values - */ -static void __update_sched_clock(struct sched_clock_data *scd, u64 now) -{ - unsigned long now_jiffies = jiffies; - long delta_jiffies = now_jiffies - scd->prev_jiffies; - u64 clock = scd->clock; - u64 min_clock, max_clock; - s64 delta = now - scd->prev_raw; - - WARN_ON_ONCE(!irqs_disabled()); - min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; - - if (unlikely(delta < 0)) { - clock++; - goto out; - } - - max_clock = min_clock + TICK_NSEC; - - if (unlikely(clock + delta > max_clock)) { - if (clock < max_clock) - clock = max_clock; - else - clock++; - } else { - clock += delta; - } - - out: - if (unlikely(clock < min_clock)) - clock = min_clock; - - scd->prev_raw = now; - scd->prev_jiffies = now_jiffies; - scd->clock = clock; -} - -static void lock_double_clock(struct sched_clock_data *data1, - struct sched_clock_data *data2) -{ - if (data1 < data2) { - __raw_spin_lock(&data1->lock); - __raw_spin_lock(&data2->lock); - } else { - __raw_spin_lock(&data2->lock); - __raw_spin_lock(&data1->lock); - } -} - -u64 sched_clock_cpu(int cpu) -{ - struct sched_clock_data *scd = cpu_sdc(cpu); - u64 now, clock; - - WARN_ON_ONCE(!irqs_disabled()); - now = sched_clock(); - - if (cpu != raw_smp_processor_id()) { - /* - * in order to update a remote cpu's clock based on our - * unstable raw time rebase it against: - * tick_raw (offset between raw counters) - * tick_gotd (tick offset between cpus) - */ - struct sched_clock_data *my_scd = this_scd(); - - lock_double_clock(scd, my_scd); - - now -= my_scd->tick_raw; - now += scd->tick_raw; - - now -= my_scd->tick_gtod; - now += scd->tick_gtod; - - __raw_spin_unlock(&my_scd->lock); - } else { - __raw_spin_lock(&scd->lock); - } - - __update_sched_clock(scd, now); - clock = scd->clock; - - __raw_spin_unlock(&scd->lock); - - return clock; -} - -void sched_clock_tick(void) -{ - struct sched_clock_data *scd = this_scd(); - u64 now, now_gtod; - - WARN_ON_ONCE(!irqs_disabled()); - - now = sched_clock(); - now_gtod = ktime_to_ns(ktime_get()); - - __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now); - /* - * update tick_gtod after __update_sched_clock() because that will - * already observe 1 new jiffy; adding a new tick_gtod to that would - * increase the clock 2 jiffies. - */ - scd->tick_raw = now; - scd->tick_gtod = now_gtod; - __raw_spin_unlock(&scd->lock); -} - -/* - * We are going deep-idle (irqs are disabled): - */ -void sched_clock_idle_sleep_event(void) -{ - sched_clock_cpu(smp_processor_id()); -} -EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); - -/* - * We just idled delta nanoseconds (called with irqs disabled): - */ -void sched_clock_idle_wakeup_event(u64 delta_ns) -{ - struct sched_clock_data *scd = this_scd(); - u64 now = sched_clock(); - - /* - * Override the previous timestamp and ignore all - * sched_clock() deltas that occured while we idled, - * and use the PM-provided delta_ns to advance the - * rq clock: - */ - __raw_spin_lock(&scd->lock); - scd->prev_raw = now; - scd->clock += delta_ns; - __raw_spin_unlock(&scd->lock); - - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); - -#endif - -/* - * Scheduler clock - returns current time in nanosec units. - * This is default implementation. - * Architectures and sub-architectures can override this. - */ -unsigned long long __attribute__((weak)) sched_clock(void) -{ - return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); -} diff --git a/trunk/kernel/sched_debug.c b/trunk/kernel/sched_debug.c index 5f06118fbc31..6b4a12558e88 100644 --- a/trunk/kernel/sched_debug.c +++ b/trunk/kernel/sched_debug.c @@ -204,6 +204,13 @@ static void print_cpu(struct seq_file *m, int cpu) PN(next_balance); P(curr->pid); PN(clock); + PN(idle_clock); + PN(prev_clock_raw); + P(clock_warps); + P(clock_overflows); + P(clock_underflows); + P(clock_deep_idle_events); + PN(clock_max_delta); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index e24ecd39c4b8..89fa32b4edf2 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -662,15 +662,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ if (sched_feat(NEW_FAIR_SLEEPERS)) { - unsigned long thresh = sysctl_sched_latency; - - /* - * convert the sleeper threshold into virtual time - */ if (sched_feat(NORMALIZED_SLEEPER)) - thresh = calc_delta_fair(thresh, se); - - vruntime -= thresh; + vruntime -= calc_delta_weight(sysctl_sched_latency, se); + else + vruntime -= sysctl_sched_latency; } /* ensure we never gain time by being placed backwards. */ @@ -687,7 +682,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); - account_entity_enqueue(cfs_rq, se); if (wakeup) { place_entity(cfs_rq, se, 0); @@ -698,6 +692,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) check_spread(cfs_rq, se); if (se != cfs_rq->curr) __enqueue_entity(cfs_rq, se); + account_entity_enqueue(cfs_rq, se); } static void update_avg(u64 *avg, u64 sample) @@ -846,10 +841,8 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * queued ticks are scheduled to match the slice, so don't bother * validating it and just reschedule. */ - if (queued) { - resched_task(rq_of(cfs_rq)->curr); - return; - } + if (queued) + return resched_task(rq_of(cfs_rq)->curr); /* * don't let the period tick interfere with the hrtick preemption */ @@ -964,7 +957,7 @@ static void yield_task_fair(struct rq *rq) return; if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { - update_rq_clock(rq); + __update_rq_clock(rq); /* * Update run-time statistics of the 'current'. */ @@ -1014,7 +1007,7 @@ static int wake_idle(int cpu, struct task_struct *p) * sibling runqueue info. This will avoid the checks and cache miss * penalities associated with that. */ - if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) + if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) return cpu; for_each_domain(cpu, sd) { @@ -1618,6 +1611,30 @@ static const struct sched_class fair_sched_class = { }; #ifdef CONFIG_SCHED_DEBUG +static void +print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth) +{ + struct sched_entity *se; + + if (!cfs_rq) + return; + + list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) { + int i; + + for (i = depth; i; i--) + seq_puts(m, " "); + + seq_printf(m, "%lu %s %lu\n", + se->load.weight, + entity_is_task(se) ? "T" : "G", + calc_delta_weight(SCHED_LOAD_SCALE, se) + ); + if (!entity_is_task(se)) + print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1); + } +} + static void print_cfs_stats(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq; @@ -1625,6 +1642,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu) rcu_read_lock(); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq(m, cpu, cfs_rq); + + seq_printf(m, "\nWeight tree:\n"); + print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1); rcu_read_unlock(); } #endif diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 3a4f92dbbe66..2bcafa375633 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -99,7 +99,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, /* * Simple, special scheduling class for the per-CPU idle tasks: */ -static const struct sched_class idle_sched_class = { +const struct sched_class idle_sched_class = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 060e87b0cb1c..c2730a5a4f05 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -1098,14 +1098,11 @@ static void post_schedule_rt(struct rq *rq) } } -/* - * If we are not running and we are not going to reschedule soon, we should - * try to push tasks away now - */ + static void task_wake_up_rt(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && - !test_tsk_need_resched(rq->curr) && + (p->prio >= rq->rt.highest_prio) && rq->rt.overloaded) push_rt_tasks(rq); } @@ -1312,7 +1309,7 @@ static void set_curr_task_rt(struct rq *rq) p->se.exec_start = rq->clock; } -static const struct sched_class rt_sched_class = { +const struct sched_class rt_sched_class = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, diff --git a/trunk/lib/Kconfig.kgdb b/trunk/lib/Kconfig.kgdb index a5d4b1dac2a5..f2e01ac5ab09 100644 --- a/trunk/lib/Kconfig.kgdb +++ b/trunk/lib/Kconfig.kgdb @@ -1,10 +1,4 @@ -config HAVE_ARCH_KGDB_SHADOW_INFO - bool - -config HAVE_ARCH_KGDB - bool - menuconfig KGDB bool "KGDB: kernel debugging with remote gdb" select FRAME_POINTER @@ -16,10 +10,15 @@ menuconfig KGDB at http://kgdb.sourceforge.net as well as in DocBook form in Documentation/DocBook/. If unsure, say N. -if KGDB +config HAVE_ARCH_KGDB_SHADOW_INFO + bool + +config HAVE_ARCH_KGDB + bool config KGDB_SERIAL_CONSOLE tristate "KGDB: use kgdb over the serial console" + depends on KGDB select CONSOLE_POLL select MAGIC_SYSRQ default y @@ -29,6 +28,7 @@ config KGDB_SERIAL_CONSOLE config KGDB_TESTS bool "KGDB: internal test suite" + depends on KGDB default n help This is a kgdb I/O module specifically designed to test @@ -56,5 +56,3 @@ config KGDB_TESTS_BOOT_STRING boot. See the drivers/misc/kgdbts.c for detailed information about other strings you could use beyond the default of V1F100. - -endif # KGDB diff --git a/trunk/lib/devres.c b/trunk/lib/devres.c index 72c8909006da..26c87c49d776 100644 --- a/trunk/lib/devres.c +++ b/trunk/lib/devres.c @@ -2,7 +2,7 @@ #include #include -void devm_ioremap_release(struct device *dev, void *res) +static void devm_ioremap_release(struct device *dev, void *res) { iounmap(*(void __iomem **)res); } diff --git a/trunk/lib/kernel_lock.c b/trunk/lib/kernel_lock.c index 01a3c22c1b5a..cd3e82530b03 100644 --- a/trunk/lib/kernel_lock.c +++ b/trunk/lib/kernel_lock.c @@ -11,121 +11,79 @@ #include /* - * The 'big kernel lock' + * The 'big kernel semaphore' * - * This spinlock is taken and released recursively by lock_kernel() + * This mutex is taken and released recursively by lock_kernel() * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * + * Note: code locked by this semaphore will only be serialized against + * other code using the same locking facility. The code guarantees that + * the task remains on the same CPU. + * * Don't use in new code. */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); - +static DECLARE_MUTEX(kernel_sem); /* - * Acquire/release the underlying lock from the scheduler. + * Re-acquire the kernel semaphore. * - * This is called with preemption disabled, and should - * return an error value if it cannot get the lock and - * TIF_NEED_RESCHED gets set. + * This function is called with preemption off. * - * If it successfully gets the lock, it should increment - * the preemption count like any spinlock does. - * - * (This works on UP too - _raw_spin_trylock will never - * return false in that case) + * We are executing in schedule() so the code must be extremely careful + * about recursion, both due to the down() and due to the enabling of + * preemption. schedule() will re-check the preemption flag after + * reacquiring the semaphore. */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) - return -EAGAIN; - cpu_relax(); - } + struct task_struct *task = current; + int saved_lock_depth = task->lock_depth; + + BUG_ON(saved_lock_depth < 0); + + task->lock_depth = -1; + preempt_enable_no_resched(); + + down(&kernel_sem); + preempt_disable(); + task->lock_depth = saved_lock_depth; + return 0; } void __lockfunc __release_kernel_lock(void) { - _raw_spin_unlock(&kernel_flag); - preempt_enable_no_resched(); + up(&kernel_sem); } /* - * These are the BKL spinlocks - we try to be polite about preemption. - * If SMP is not on (ie UP preemption), this all goes away because the - * _raw_spin_trylock() will always succeed. + * Getting the big kernel semaphore. */ -#ifdef CONFIG_PREEMPT -static inline void __lock_kernel(void) +void __lockfunc lock_kernel(void) { - preempt_disable(); - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { - /* - * If preemption was disabled even before this - * was called, there's nothing we can be polite - * about - just spin. - */ - if (preempt_count() > 1) { - _raw_spin_lock(&kernel_flag); - return; - } + struct task_struct *task = current; + int depth = task->lock_depth + 1; + if (likely(!depth)) /* - * Otherwise, let's wait for the kernel lock - * with preemption enabled.. + * No recursion worries - we set up lock_depth _after_ */ - do { - preempt_enable(); - while (spin_is_locked(&kernel_flag)) - cpu_relax(); - preempt_disable(); - } while (!_raw_spin_trylock(&kernel_flag)); - } -} + down(&kernel_sem); -#else - -/* - * Non-preemption case - just get the spinlock - */ -static inline void __lock_kernel(void) -{ - _raw_spin_lock(&kernel_flag); + task->lock_depth = depth; } -#endif -static inline void __unlock_kernel(void) +void __lockfunc unlock_kernel(void) { - /* - * the BKL is not covered by lockdep, so we open-code the - * unlocking sequence (and thus avoid the dep-chain ops): - */ - _raw_spin_unlock(&kernel_flag); - preempt_enable(); -} + struct task_struct *task = current; -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, so we only need to - * worry about other CPU's. - */ -void __lockfunc lock_kernel(void) -{ - int depth = current->lock_depth+1; - if (likely(!depth)) - __lock_kernel(); - current->lock_depth = depth; -} + BUG_ON(task->lock_depth < 0); -void __lockfunc unlock_kernel(void) -{ - BUG_ON(current->lock_depth < 0); - if (likely(--current->lock_depth < 0)) - __unlock_kernel(); + if (likely(--task->lock_depth < 0)) + up(&kernel_sem); } EXPORT_SYMBOL(lock_kernel); diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 2dead9adf8b7..239d36163bbe 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1655,7 +1655,7 @@ int should_remove_suid(struct dentry *dentry) } EXPORT_SYMBOL(should_remove_suid); -static int __remove_suid(struct dentry *dentry, int kill) +int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 48c122d42ed7..bbab1e37055e 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -969,7 +969,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto no_page_table; pmd = pmd_offset(pud, address); - if (pmd_none(*pmd)) + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto no_page_table; if (pmd_huge(*pmd)) { @@ -978,9 +978,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto out; } - if (unlikely(pmd_bad(*pmd))) - goto no_page_table; - ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) goto out; diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index a505a828ef41..d379b782fc83 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -3762,7 +3762,7 @@ static int any_slab_objects(struct kmem_cache *s) if (!n) continue; - if (atomic_long_read(&n->total_objects)) + if (atomic_read(&n->total_objects)) return 1; } return 0; diff --git a/trunk/net/atm/br2684.c b/trunk/net/atm/br2684.c index 9d52ebfc1962..1b228065e745 100644 --- a/trunk/net/atm/br2684.c +++ b/trunk/net/atm/br2684.c @@ -346,9 +346,9 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) /* skb==NULL means VCC is being destroyed */ br2684_close_vcc(brvcc); if (list_empty(&brdev->brvccs)) { - write_lock_irq(&devs_lock); + read_lock(&devs_lock); list_del(&brdev->br2684_devs); - write_unlock_irq(&devs_lock); + read_unlock(&devs_lock); unregister_netdev(net_dev); free_netdev(net_dev); } diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c index c2397f503b0f..77a981a1ee52 100644 --- a/trunk/net/bridge/br_if.c +++ b/trunk/net/bridge/br_if.c @@ -273,13 +273,15 @@ int br_add_bridge(const char *name) rtnl_lock(); if (strchr(dev->name, '%')) { ret = dev_alloc_name(dev, dev->name); - if (ret < 0) - goto out_free; + if (ret < 0) { + free_netdev(dev); + goto out; + } } ret = register_netdevice(dev); if (ret) - goto out_free; + goto out; ret = br_sysfs_addbr(dev); if (ret) @@ -287,10 +289,6 @@ int br_add_bridge(const char *name) out: rtnl_unlock(); return ret; - -out_free: - free_netdev(dev); - goto out; } int br_del_bridge(const char *name) diff --git a/trunk/net/can/af_can.c b/trunk/net/can/af_can.c index 7e8ca2836452..2759b76f731c 100644 --- a/trunk/net/can/af_can.c +++ b/trunk/net/can/af_can.c @@ -208,7 +208,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol) */ int can_send(struct sk_buff *skb, int loop) { - struct sk_buff *newskb = NULL; int err; if (skb->dev->type != ARPHRD_CAN) { @@ -245,7 +244,8 @@ int can_send(struct sk_buff *skb, int loop) * If the interface is not capable to do loopback * itself, we do it here. */ - newskb = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); + if (!newskb) { kfree_skb(skb); return -ENOMEM; @@ -254,6 +254,7 @@ int can_send(struct sk_buff *skb, int loop) newskb->sk = skb->sk; newskb->ip_summed = CHECKSUM_UNNECESSARY; newskb->pkt_type = PACKET_BROADCAST; + netif_rx(newskb); } } else { /* indication for the CAN driver: no loopback required */ @@ -265,20 +266,11 @@ int can_send(struct sk_buff *skb, int loop) if (err > 0) err = net_xmit_errno(err); - if (err) { - if (newskb) - kfree_skb(newskb); - return err; - } - - if (newskb) - netif_rx(newskb); - /* update statistics */ can_stats.tx_frames++; can_stats.tx_frames_delta++; - return 0; + return err; } EXPORT_SYMBOL(can_send); diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index a1607bc0cd4c..d334446a8eaf 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -994,8 +994,6 @@ int dev_open(struct net_device *dev) { int ret = 0; - ASSERT_RTNL(); - /* * Is it already up? */ @@ -1062,8 +1060,6 @@ int dev_open(struct net_device *dev) */ int dev_close(struct net_device *dev) { - ASSERT_RTNL(); - might_sleep(); if (!(dev->flags & IFF_UP)) @@ -4484,19 +4480,17 @@ static void __net_exit default_device_exit(struct net *net) rtnl_lock(); for_each_netdev_safe(net, dev, next) { int err; - char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ if (dev->features & NETIF_F_NETNS_LOCAL) continue; /* Push remaing network devices to init_net */ - snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); - err = dev_change_net_namespace(dev, &init_net, fb_name); + err = dev_change_net_namespace(dev, &init_net, "dev%d"); if (err) { - printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", + printk(KERN_WARNING "%s: failed to move %s to init_net: %d\n", __func__, dev->name, err); - BUG(); + unregister_netdevice(dev); } } rtnl_unlock(); diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index 5c459f2b7985..4fe605fa6f8a 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -200,9 +200,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, goto nodata; /* - * Only clear those fields we need to clear, not those that we will - * actually initialise below. Hence, don't put any more fields after - * the tail pointer in struct sk_buff! + * See comment in sk_buff definition, just before the 'tail' member */ memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = size + sizeof(struct sk_buff); diff --git a/trunk/net/dccp/feat.c b/trunk/net/dccp/feat.c index 933a0ecf8d46..4a4f6ce4498d 100644 --- a/trunk/net/dccp/feat.c +++ b/trunk/net/dccp/feat.c @@ -32,7 +32,7 @@ int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, if (len > 3) { DCCP_WARN("invalid length %d\n", len); - return -EINVAL; + return 1; } /* XXX add further sanity checks */ diff --git a/trunk/net/decnet/dn_route.c b/trunk/net/decnet/dn_route.c index f50e88bf2661..2f665a516476 100644 --- a/trunk/net/decnet/dn_route.c +++ b/trunk/net/decnet/dn_route.c @@ -235,14 +235,14 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) else min_mtu -= 21; - if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { + if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= min_mtu) { if (!(dst_metric_locked(dst, RTAX_MTU))) { dst->metrics[RTAX_MTU-1] = mtu; dst_set_expires(dst, dn_rt_mtu_expires); } if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; - if (dst_metric(dst, RTAX_ADVMSS) > mss) + if (dst->metrics[RTAX_ADVMSS-1] > mss) dst->metrics[RTAX_ADVMSS-1] = mss; } } @@ -805,12 +805,12 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) rt->u.dst.neighbour = n; } - if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 || - dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu) + if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || + rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 || - dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss) + if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || + rt->u.dst.metrics[RTAX_ADVMSS-1] > mss) rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; return 0; } diff --git a/trunk/net/ipv4/ip_input.c b/trunk/net/ipv4/ip_input.c index ff77a4a7f9ec..7b4bad6d572f 100644 --- a/trunk/net/ipv4/ip_input.c +++ b/trunk/net/ipv4/ip_input.c @@ -397,7 +397,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, iph = ip_hdr(skb); /* - * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum. + * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. * * Is the datagram acceptable? * diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 92f90ae46f4a..5e3685c5c407 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -1468,14 +1468,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, /* BSD 4.2 compatibility hack :-( */ if (mtu == 0 && - old_mtu >= dst_metric(&rth->u.dst, RTAX_MTU) && + old_mtu >= rth->u.dst.metrics[RTAX_MTU-1] && old_mtu >= 68 + (iph->ihl << 2)) old_mtu -= iph->ihl << 2; mtu = guess_mtu(old_mtu); } - if (mtu <= dst_metric(&rth->u.dst, RTAX_MTU)) { - if (mtu < dst_metric(&rth->u.dst, RTAX_MTU)) { + if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { + if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { dst_confirm(&rth->u.dst); if (mtu < ip_rt_min_pmtu) { mtu = ip_rt_min_pmtu; @@ -1497,7 +1497,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) { - if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= 68 && + if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= 68 && !(dst_metric_locked(dst, RTAX_MTU))) { if (mtu < ip_rt_min_pmtu) { mtu = ip_rt_min_pmtu; @@ -1613,7 +1613,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) sizeof(rt->u.dst.metrics)); if (fi->fib_mtu == 0) { rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; - if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && + if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) && rt->rt_gateway != rt->rt_dst && rt->u.dst.dev->mtu > 576) rt->u.dst.metrics[RTAX_MTU-1] = 576; @@ -1624,14 +1624,14 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) } else rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; - if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) + if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0) rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; - if (dst_metric(&rt->u.dst, RTAX_MTU) > IP_MAX_MTU) + if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU) rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) + if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0) rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, ip_rt_min_advmss); - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) + if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40) rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; #ifdef CONFIG_NET_CLS_ROUTE diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 26c936930e92..eda4f4a233f3 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -66,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -114,6 +113,8 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) +#define IsSackFrto() (sysctl_tcp_frto == 0x2) + #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) @@ -604,7 +605,7 @@ static u32 tcp_rto_min(struct sock *sk) u32 rto_min = TCP_RTO_MIN; if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) - rto_min = dst_metric(dst, RTAX_RTO_MIN); + rto_min = dst->metrics[RTAX_RTO_MIN - 1]; return rto_min; } @@ -768,7 +769,7 @@ void tcp_update_metrics(struct sock *sk) dst->metrics[RTAX_RTTVAR - 1] = m; else dst->metrics[RTAX_RTTVAR-1] -= - (dst_metric(dst, RTAX_RTTVAR) - m)>>2; + (dst->metrics[RTAX_RTTVAR-1] - m)>>2; } if (tp->snd_ssthresh >= 0xFFFF) { @@ -787,21 +788,21 @@ void tcp_update_metrics(struct sock *sk) dst->metrics[RTAX_SSTHRESH-1] = max(tp->snd_cwnd >> 1, tp->snd_ssthresh); if (!dst_metric_locked(dst, RTAX_CWND)) - dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1; + dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1; } else { /* Else slow start did not finish, cwnd is non-sense, ssthresh may be also invalid. */ if (!dst_metric_locked(dst, RTAX_CWND)) - dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1; - if (dst_metric(dst, RTAX_SSTHRESH) && + dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1; + if (dst->metrics[RTAX_SSTHRESH-1] && !dst_metric_locked(dst, RTAX_SSTHRESH) && - tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) + tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1]) dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; } if (!dst_metric_locked(dst, RTAX_REORDERING)) { - if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && + if (dst->metrics[RTAX_REORDERING-1] < tp->reordering && tp->reordering != sysctl_tcp_reordering) dst->metrics[RTAX_REORDERING-1] = tp->reordering; } @@ -1684,11 +1685,6 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) tp->sacked_out = 0; } -static int tcp_is_sackfrto(const struct tcp_sock *tp) -{ - return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); -} - /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ @@ -1705,7 +1701,7 @@ int tcp_use_frto(struct sock *sk) if (icsk->icsk_mtup.probe_size) return 0; - if (tcp_is_sackfrto(tp)) + if (IsSackFrto()) return 1; /* Avoid expensive walking of rexmit queue if possible */ @@ -1795,7 +1791,7 @@ void tcp_enter_frto(struct sock *sk) /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. */ - if (tcp_is_sackfrto(tp) && (tp->frto_counter || + if (IsSackFrto() && (tp->frto_counter || ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && after(tp->high_seq, tp->snd_una)) { tp->frto_highmark = tp->high_seq; @@ -3127,7 +3123,7 @@ static int tcp_process_frto(struct sock *sk, int flag) return 1; } - if (!tcp_is_sackfrto(tp)) { + if (!IsSackFrto() || tcp_is_reno(tp)) { /* RFC4138 shortcoming in step 2; should also have case c): * ACK isn't duplicate nor advances window, e.g., opposite dir * data, winupdate diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 12bba0880345..a493ad9b8914 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -1243,11 +1243,11 @@ int ip6_route_add(struct fib6_config *cfg) } } - if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) + if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0) rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; - if (!dst_metric(&rt->u.dst, RTAX_MTU)) + if (!rt->u.dst.metrics[RTAX_MTU-1]) rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); - if (!dst_metric(&rt->u.dst, RTAX_ADVMSS)) + if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); rt->u.dst.dev = dev; rt->rt6i_idev = idev; diff --git a/trunk/net/ipv6/sit.c b/trunk/net/ipv6/sit.c index 5a6fab95569f..4b2f1033994e 100644 --- a/trunk/net/ipv6/sit.c +++ b/trunk/net/ipv6/sit.c @@ -596,9 +596,9 @@ static int ipip6_rcv(struct sk_buff *skb) } icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + kfree_skb(skb); read_unlock(&ipip6_lock); out: - kfree_skb(skb); return 0; } diff --git a/trunk/net/mac80211/main.c b/trunk/net/mac80211/main.c index 915afadb0602..9ad4e3631b6b 100644 --- a/trunk/net/mac80211/main.c +++ b/trunk/net/mac80211/main.c @@ -1766,7 +1766,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) fail_rate: ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev)); unregister_netdevice(local->mdev); - local->mdev = NULL; fail_dev: rtnl_unlock(); sta_info_stop(local); @@ -1774,10 +1773,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) debugfs_hw_del(local); destroy_workqueue(local->hw.workqueue); fail_workqueue: - if (local->mdev != NULL) { - ieee80211_if_free(local->mdev); - local->mdev = NULL; - } + ieee80211_if_free(local->mdev); + local->mdev = NULL; fail_mdev_alloc: wiphy_unregister(local->hw.wiphy); return result; diff --git a/trunk/net/mac80211/rc80211_pid_debugfs.c b/trunk/net/mac80211/rc80211_pid_debugfs.c index ff5c380f3c13..ae75d4178739 100644 --- a/trunk/net/mac80211/rc80211_pid_debugfs.c +++ b/trunk/net/mac80211/rc80211_pid_debugfs.c @@ -85,7 +85,7 @@ static int rate_control_pid_events_open(struct inode *inode, struct file *file) struct rc_pid_sta_info *sinfo = inode->i_private; struct rc_pid_event_buffer *events = &sinfo->events; struct rc_pid_events_file_info *file_info; - unsigned long status; + unsigned int status; /* Allocate a state struct */ file_info = kmalloc(sizeof(*file_info), GFP_KERNEL); @@ -135,7 +135,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, char pb[RC_PID_PRINT_BUF_SIZE]; int ret; int p; - unsigned long status; + unsigned int status; /* Check if there is something to read. */ if (events->next_entry == file_info->next_entry) { diff --git a/trunk/net/netfilter/Kconfig b/trunk/net/netfilter/Kconfig index aa8d80c35e28..c1fc0f1a641c 100644 --- a/trunk/net/netfilter/Kconfig +++ b/trunk/net/netfilter/Kconfig @@ -90,7 +90,6 @@ config NF_CT_PROTO_DCCP tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)' depends on EXPERIMENTAL && NF_CONNTRACK depends on NETFILTER_ADVANCED - default IP_DCCP help With this option enabled, the layer 3 independent connection tracking code will be able to do state tracking on DCCP connections. @@ -105,7 +104,6 @@ config NF_CT_PROTO_SCTP tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' depends on EXPERIMENTAL && NF_CONNTRACK depends on NETFILTER_ADVANCED - default IP_SCTP help With this option enabled, the layer 3 independent connection tracking code will be able to do state tracking on SCTP connections. @@ -534,7 +532,6 @@ config NETFILTER_XT_MATCH_DCCP tristate '"dccp" protocol match support' depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED - default IP_DCCP help With this option enabled, you will be able to use the iptables `dccp' match in order to match on DCCP source/destination ports @@ -728,7 +725,6 @@ config NETFILTER_XT_MATCH_SCTP tristate '"sctp" protocol match support (EXPERIMENTAL)' depends on NETFILTER_XTABLES && EXPERIMENTAL depends on NETFILTER_ADVANCED - default IP_SCTP help With this option enabled, you will be able to use the `sctp' match in order to match on SCTP source/destination ports diff --git a/trunk/net/netfilter/nf_conntrack_sip.c b/trunk/net/netfilter/nf_conntrack_sip.c index 2f9bbc058b48..9f4900069561 100644 --- a/trunk/net/netfilter/nf_conntrack_sip.c +++ b/trunk/net/netfilter/nf_conntrack_sip.c @@ -870,7 +870,6 @@ static int process_sdp(struct sk_buff *skb, { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_help *help = nfct_help(ct); unsigned int matchoff, matchlen; unsigned int mediaoff, medialen; unsigned int sdpoff; @@ -960,9 +959,6 @@ static int process_sdp(struct sk_buff *skb, if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr); - if (ret == NF_ACCEPT && i > 0) - help->help.ct_sip_info.invite_cseq = cseq; - return ret; } static int process_invite_response(struct sk_buff *skb, @@ -971,14 +967,14 @@ static int process_invite_response(struct sk_buff *skb, { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dptr, datalen, cseq); - else if (help->help.ct_sip_info.invite_cseq == cseq) + else { flush_expectations(ct, true); - return NF_ACCEPT; + return NF_ACCEPT; + } } static int process_update_response(struct sk_buff *skb, @@ -987,14 +983,14 @@ static int process_update_response(struct sk_buff *skb, { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dptr, datalen, cseq); - else if (help->help.ct_sip_info.invite_cseq == cseq) + else { flush_expectations(ct, true); - return NF_ACCEPT; + return NF_ACCEPT; + } } static int process_prack_response(struct sk_buff *skb, @@ -1003,14 +999,14 @@ static int process_prack_response(struct sk_buff *skb, { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_help *help = nfct_help(ct); if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) return process_sdp(skb, dptr, datalen, cseq); - else if (help->help.ct_sip_info.invite_cseq == cseq) + else { flush_expectations(ct, true); - return NF_ACCEPT; + return NF_ACCEPT; + } } static int process_bye_request(struct sk_buff *skb, diff --git a/trunk/net/sched/act_simple.c b/trunk/net/sched/act_simple.c index 1d421d059caf..64b2d136c78e 100644 --- a/trunk/net/sched/act_simple.c +++ b/trunk/net/sched/act_simple.c @@ -6,7 +6,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * Authors: Jamal Hadi Salim (2005-8) + * Authors: Jamal Hadi Salim (2005) * */ @@ -34,7 +34,6 @@ static struct tcf_hashinfo simp_hash_info = { .lock = &simp_lock, }; -#define SIMP_MAX_DATA 32 static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_defact *d = a->priv; @@ -70,28 +69,23 @@ static int tcf_simp_release(struct tcf_defact *d, int bind) return ret; } -static int alloc_defdata(struct tcf_defact *d, char *defdata) +static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) { - d->tcfd_defdata = kstrndup(defdata, SIMP_MAX_DATA, GFP_KERNEL); + d->tcfd_defdata = kmemdup(defdata, datalen, GFP_KERNEL); if (unlikely(!d->tcfd_defdata)) return -ENOMEM; - + d->tcfd_datalen = datalen; return 0; } -static void reset_policy(struct tcf_defact *d, char *defdata, - struct tc_defact *p) +static int realloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) { - spin_lock_bh(&d->tcf_lock); - d->tcf_action = p->action; - memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); - spin_unlock_bh(&d->tcf_lock); + kfree(d->tcfd_defdata); + return alloc_defdata(d, datalen, defdata); } static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { [TCA_DEF_PARMS] = { .len = sizeof(struct tc_defact) }, - [TCA_DEF_DATA] = { .type = NLA_STRING, .len = SIMP_MAX_DATA }, }; static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, @@ -101,24 +95,28 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, struct tc_defact *parm; struct tcf_defact *d; struct tcf_common *pc; - char *defdata; + void *defdata; + u32 datalen = 0; int ret = 0, err; if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy); + err = nla_parse_nested(tb, TCA_DEF_MAX, nla, NULL); if (err < 0) return err; if (tb[TCA_DEF_PARMS] == NULL) return -EINVAL; - if (tb[TCA_DEF_DATA] == NULL) - return -EINVAL; - parm = nla_data(tb[TCA_DEF_PARMS]); defdata = nla_data(tb[TCA_DEF_DATA]); + if (defdata == NULL) + return -EINVAL; + + datalen = nla_len(tb[TCA_DEF_DATA]); + if (datalen == 0) + return -EINVAL; pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info); if (!pc) { @@ -128,12 +126,11 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, return -ENOMEM; d = to_defact(pc); - ret = alloc_defdata(d, defdata); + ret = alloc_defdata(d, datalen, defdata); if (ret < 0) { kfree(pc); return ret; } - d->tcf_action = parm->action; ret = ACT_P_CREATED; } else { d = to_defact(pc); @@ -141,9 +138,13 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, tcf_simp_release(d, bind); return -EEXIST; } - reset_policy(d, defdata, parm); + realloc_defdata(d, datalen, defdata); } + spin_lock_bh(&d->tcf_lock); + d->tcf_action = parm->action; + spin_unlock_bh(&d->tcf_lock); + if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &simp_hash_info); return ret; @@ -171,7 +172,7 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, opt.bindcnt = d->tcf_bindcnt - bind; opt.action = d->tcf_action; NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); - NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); + NLA_PUT(skb, TCA_DEF_DATA, d->tcfd_datalen, d->tcfd_defdata); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); diff --git a/trunk/net/sched/sch_htb.c b/trunk/net/sched/sch_htb.c index 5bc1ed490180..66148cc4759e 100644 --- a/trunk/net/sched/sch_htb.c +++ b/trunk/net/sched/sch_htb.c @@ -1197,16 +1197,12 @@ static inline int htb_parent_last_child(struct htb_class *cl) return 1; } -static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, - struct Qdisc *new_q) +static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q) { struct htb_class *parent = cl->parent; BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); - if (parent->cmode != HTB_CAN_SEND) - htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); - parent->level = 0; memset(&parent->un.inner, 0, sizeof(parent->un.inner)); INIT_LIST_HEAD(&parent->un.leaf.drop_list); @@ -1304,7 +1300,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) htb_deactivate(q, cl); if (last_child) - htb_parent_to_leaf(q, cl, new_q); + htb_parent_to_leaf(cl, new_q); if (--cl->refcnt == 0) htb_destroy_class(sch, cl); diff --git a/trunk/net/sunrpc/svc.c b/trunk/net/sunrpc/svc.c index 01c7e311b904..d74c2d269539 100644 --- a/trunk/net/sunrpc/svc.c +++ b/trunk/net/sunrpc/svc.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/trunk/net/tipc/core.h b/trunk/net/tipc/core.h index 5a0e4878d3b7..325404fd4eb5 100644 --- a/trunk/net/tipc/core.h +++ b/trunk/net/tipc/core.h @@ -279,14 +279,15 @@ static inline void k_term_timer(struct timer_list *timer) /* * TIPC message buffer code * - * TIPC message buffer headroom reserves space for the worst-case - * link-level device header (in case the message is sent off-node). + * TIPC message buffer headroom reserves space for a link-level header + * (in case the message is sent off-node), + * while ensuring TIPC header is word aligned for quicker access * - * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields - * are word aligned for quicker access + * The largest header currently supported is 18 bytes, which is used when + * the standard 14 byte Ethernet header has 4 added bytes for VLAN info */ -#define BUF_HEADROOM LL_MAX_HEADER +#define BUF_HEADROOM 20u struct tipc_skb_cb { void *handle; diff --git a/trunk/scripts/kconfig/lkc.h b/trunk/scripts/kconfig/lkc.h index 96521cb087ec..4bc68f20a73c 100644 --- a/trunk/scripts/kconfig/lkc.h +++ b/trunk/scripts/kconfig/lkc.h @@ -11,9 +11,9 @@ #ifndef KBUILD_NO_NLS # include #else -static inline const char *gettext(const char *txt) { return txt; } -static inline void textdomain(const char *domainname) {} -static inline void bindtextdomain(const char *name, const char *dir) {} +# define gettext(Msgid) ((const char *) (Msgid)) +# define textdomain(Domainname) ((const char *) (Domainname)) +# define bindtextdomain(Domainname, Dirname) ((const char *) (Dirname)) #endif #ifdef __cplusplus diff --git a/trunk/scripts/kconfig/mconf.c b/trunk/scripts/kconfig/mconf.c index 6841e95c0989..734cf4f3131e 100644 --- a/trunk/scripts/kconfig/mconf.c +++ b/trunk/scripts/kconfig/mconf.c @@ -773,7 +773,7 @@ static void conf_string(struct menu *menu) while (1) { int res; - const char *heading; + char *heading; switch (sym_get_type(menu->sym)) { case S_INT: @@ -925,4 +925,3 @@ int main(int ac, char **av) return 0; } - diff --git a/trunk/scripts/mod/file2alias.c b/trunk/scripts/mod/file2alias.c index cea4a790e1e9..e04c4218cb52 100644 --- a/trunk/scripts/mod/file2alias.c +++ b/trunk/scripts/mod/file2alias.c @@ -51,15 +51,6 @@ do { \ sprintf(str + strlen(str), "*"); \ } while(0) -/* Always end in a wildcard, for future extension */ -static inline void add_wildcard(char *str) -{ - int len = strlen(str); - - if (str[len - 1] != '*') - strcat(str + len, "*"); -} - unsigned int cross_build = 0; /** * Check that sizeof(device_id type) are consistent with size of section @@ -142,7 +133,9 @@ static void do_usb_entry(struct usb_device_id *id, id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL, id->bInterfaceProtocol); - add_wildcard(alias); + /* Always end in a wildcard, for future extension */ + if (alias[strlen(alias)-1] != '*') + strcat(alias, "*"); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } @@ -226,7 +219,6 @@ static int do_ieee1394_entry(const char *filename, ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION, id->version); - add_wildcard(alias); return 1; } @@ -269,7 +261,6 @@ static int do_pci_entry(const char *filename, ADD(alias, "bc", baseclass_mask == 0xFF, baseclass); ADD(alias, "sc", subclass_mask == 0xFF, subclass); ADD(alias, "i", interface_mask == 0xFF, interface); - add_wildcard(alias); return 1; } @@ -292,7 +283,6 @@ static int do_ccw_entry(const char *filename, id->dev_type); ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL, id->dev_model); - add_wildcard(alias); return 1; } @@ -300,7 +290,7 @@ static int do_ccw_entry(const char *filename, static int do_ap_entry(const char *filename, struct ap_device_id *id, char *alias) { - sprintf(alias, "ap:t%02X*", id->dev_type); + sprintf(alias, "ap:t%02X", id->dev_type); return 1; } @@ -319,7 +309,6 @@ static int do_serio_entry(const char *filename, ADD(alias, "id", id->id != SERIO_ANY, id->id); ADD(alias, "ex", id->extra != SERIO_ANY, id->extra); - add_wildcard(alias); return 1; } @@ -327,7 +316,7 @@ static int do_serio_entry(const char *filename, static int do_acpi_entry(const char *filename, struct acpi_device_id *id, char *alias) { - sprintf(alias, "acpi*:%s:*", id->id); + sprintf(alias, "acpi*:%s:", id->id); return 1; } @@ -335,7 +324,7 @@ static int do_acpi_entry(const char *filename, static int do_pnp_entry(const char *filename, struct pnp_device_id *id, char *alias) { - sprintf(alias, "pnp:d%s*", id->id); + sprintf(alias, "pnp:d%s", id->id); return 1; } @@ -420,7 +409,6 @@ static int do_pcmcia_entry(const char *filename, ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]); ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]); - add_wildcard(alias); return 1; } @@ -444,7 +432,6 @@ static int do_of_entry (const char *filename, struct of_device_id *of, char *ali if (isspace (*tmp)) *tmp = '_'; - add_wildcard(alias); return 1; } @@ -461,7 +448,6 @@ static int do_vio_entry(const char *filename, struct vio_device_id *vio, if (isspace (*tmp)) *tmp = '_'; - add_wildcard(alias); return 1; } @@ -525,8 +511,6 @@ static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa, { if (eisa->sig[0]) sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig); - else - strcat(alias, "*"); return 1; } @@ -545,7 +529,6 @@ static int do_parisc_entry(const char *filename, struct parisc_device_id *id, ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev); ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion); - add_wildcard(alias); return 1; } @@ -561,7 +544,6 @@ static int do_sdio_entry(const char *filename, ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class); ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor); ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device); - add_wildcard(alias); return 1; } @@ -577,7 +559,6 @@ static int do_ssb_entry(const char *filename, ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor); ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid); ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision); - add_wildcard(alias); return 1; } @@ -592,7 +573,6 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id, ADD(alias, "d", 1, id->device); ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor); - add_wildcard(alias); return 1; } @@ -632,6 +612,9 @@ static void do_table(void *symval, unsigned long size, for (i = 0; i < size; i += id_size) { if (do_entry(mod->name, symval+i, alias)) { + /* Always end in a wildcard, for future extension */ + if (alias[strlen(alias)-1] != '*') + strcat(alias, "*"); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } diff --git a/trunk/sound/drivers/Kconfig b/trunk/sound/drivers/Kconfig index 379bcb074463..a78a8d045175 100644 --- a/trunk/sound/drivers/Kconfig +++ b/trunk/sound/drivers/Kconfig @@ -5,8 +5,8 @@ menu "Generic devices" config SND_PCSP - tristate "PC-Speaker support" - depends on PCSPKR_PLATFORM && X86_PC && HIGH_RES_TIMERS + tristate "Internal PC speaker support" + depends on X86_PC && HIGH_RES_TIMERS depends on INPUT depends on SND select SND_PCM diff --git a/trunk/sound/drivers/pcsp/pcsp.c b/trunk/sound/drivers/pcsp/pcsp.c index 54a1f9036c66..59203511e77d 100644 --- a/trunk/sound/drivers/pcsp/pcsp.c +++ b/trunk/sound/drivers/pcsp/pcsp.c @@ -194,7 +194,6 @@ static void pcsp_stop_beep(struct snd_pcsp *chip) spin_unlock_irq(&chip->substream_lock); } -#ifdef CONFIG_PM static int pcsp_suspend(struct platform_device *dev, pm_message_t state) { struct snd_pcsp *chip = platform_get_drvdata(dev); @@ -202,9 +201,6 @@ static int pcsp_suspend(struct platform_device *dev, pm_message_t state) snd_pcm_suspend_all(chip->pcm); return 0; } -#else -#define pcsp_suspend NULL -#endif /* CONFIG_PM */ static void pcsp_shutdown(struct platform_device *dev) { diff --git a/trunk/sound/oss/kahlua.c b/trunk/sound/oss/kahlua.c index eb9bc365530d..dfe670f12e67 100644 --- a/trunk/sound/oss/kahlua.c +++ b/trunk/sound/oss/kahlua.c @@ -67,7 +67,7 @@ static int __devinit probe_one(struct pci_dev *pdev, const struct pci_device_id return 1; mem = ioremap(base, 128); - if (!mem) + if(mem == 0UL) return 1; map = readw(mem + 0x18); /* Read the SMI enables */ iounmap(mem); diff --git a/trunk/sound/pci/Kconfig b/trunk/sound/pci/Kconfig index 7e4742109572..581debf37dcb 100644 --- a/trunk/sound/pci/Kconfig +++ b/trunk/sound/pci/Kconfig @@ -515,16 +515,19 @@ config SND_FM801 config SND_FM801_TEA575X_BOOL bool "ForteMedia FM801 + TEA5757 tuner" depends on SND_FM801 - depends on VIDEO_V4L1=y || VIDEO_V4L1=SND_FM801 help Say Y here to include support for soundcards based on the ForteMedia FM801 chip with a TEA5757 tuner connected to GPIO1-3 pins (Media Forte SF256-PCS-02) into the snd-fm801 driver. + This will enable support for the old V4L1 API. + config SND_FM801_TEA575X tristate depends on SND_FM801_TEA575X_BOOL default SND_FM801 + select VIDEO_V4L1 + select VIDEO_DEV config SND_HDA_INTEL tristate "Intel HD Audio" diff --git a/trunk/sound/pci/ac97/ac97_patch.c b/trunk/sound/pci/ac97/ac97_patch.c index 2da89810ca10..39198e505b12 100644 --- a/trunk/sound/pci/ac97/ac97_patch.c +++ b/trunk/sound/pci/ac97/ac97_patch.c @@ -3446,7 +3446,6 @@ static const struct snd_kcontrol_new snd_ac97_controls_vt1617a[] = { int patch_vt1617a(struct snd_ac97 * ac97) { int err = 0; - int val; /* we choose to not fail out at this point, but we tell the caller when we return */ @@ -3457,13 +3456,7 @@ int patch_vt1617a(struct snd_ac97 * ac97) /* bring analog power consumption to normal by turning off the * headphone amplifier, like WinXP driver for EPIA SP */ - /* We need to check the bit before writing it. - * On some (many?) hardwares, setting bit actually clears it! - */ - val = snd_ac97_read(ac97, 0x5c); - if (!(val & 0x20)) - snd_ac97_write_cache(ac97, 0x5c, 0x20); - + snd_ac97_write_cache(ac97, 0x5c, 0x20); ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */ ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000; ac97->build_ops = &patch_vt1616_ops; diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index 6d4df45e81e0..d9783a4263e0 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -11902,10 +11902,7 @@ static void alc861_auto_set_output_and_unmute(struct hda_codec *codec, hda_nid_t nid, int pin_type, int dac_idx) { - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, - pin_type); - snd_hda_codec_write(codec, dac_idx, 0, AC_VERB_SET_AMP_GAIN_MUTE, - AMP_OUT_UNMUTE); + alc_set_pin_output(codec, nid, pin_type); } static void alc861_auto_init_multi_out(struct hda_codec *codec) diff --git a/trunk/sound/pci/hda/patch_sigmatel.c b/trunk/sound/pci/hda/patch_sigmatel.c index 393f7fd2b1be..b3a15d616873 100644 --- a/trunk/sound/pci/hda/patch_sigmatel.c +++ b/trunk/sound/pci/hda/patch_sigmatel.c @@ -4289,8 +4289,6 @@ struct hda_codec_preset snd_hda_preset_sigmatel[] = { { .id = 0x83847635, .name = "STAC9250D", .patch = patch_stac925x }, { .id = 0x83847636, .name = "STAC9251", .patch = patch_stac925x }, { .id = 0x83847637, .name = "STAC9250D", .patch = patch_stac925x }, - { .id = 0x83847645, .name = "92HD206X", .patch = patch_stac927x }, - { .id = 0x83847646, .name = "92HD206D", .patch = patch_stac927x }, /* The following does not take into account .id=0x83847661 when subsys = * 104D0C00 which is STAC9225s. Because of this, some SZ Notebooks are * currently not fully supported. diff --git a/trunk/sound/soc/at91/at91-pcm.c b/trunk/sound/soc/at91/at91-pcm.c index ccac6bd2889c..67c88e322fb1 100644 --- a/trunk/sound/soc/at91/at91-pcm.c +++ b/trunk/sound/soc/at91/at91-pcm.c @@ -103,8 +103,7 @@ static void at91_pcm_dma_irq(u32 ssc_sr, if (prtd->period_ptr >= prtd->dma_buffer_end) { prtd->period_ptr = prtd->dma_buffer; } - at91_ssc_write(params->ssc_base + params->pdc->xnpr, - prtd->period_ptr); + at91_ssc_write(params->ssc_base + params->pdc->xnpr, prtd->period_ptr); at91_ssc_write(params->ssc_base + params->pdc->xncr, prtd->period_size / params->pdc_xfer_size); } @@ -192,12 +191,10 @@ static int at91_pcm_trigger(struct snd_pcm_substream *substream, at91_ssc_write(params->ssc_base + AT91_SSC_IER, params->mask->ssc_endx | params->mask->ssc_endbuf); - at91_ssc_write(params->ssc_base + ATMEL_PDC_PTCR, - params->mask->pdc_enable); + at91_ssc_write(params->ssc_base + ATMEL_PDC_PTCR, params->mask->pdc_enable); - DBG("sr=%lx imr=%lx\n", - at91_ssc_read(params->ssc_base + AT91_SSC_SR), - at91_ssc_read(params->ssc_base + AT91_SSC_IMR)); + DBG("sr=%lx imr=%lx\n", at91_ssc_read(params->ssc_base + AT91_SSC_SR), + at91_ssc_read(params->ssc_base + AT91_SSC_IER)); break; case SNDRV_PCM_TRIGGER_STOP: diff --git a/trunk/sound/soc/at91/at91-ssc.c b/trunk/sound/soc/at91/at91-ssc.c index bc35d00a38f8..f642d2dd4ec3 100644 --- a/trunk/sound/soc/at91/at91-ssc.c +++ b/trunk/sound/soc/at91/at91-ssc.c @@ -590,7 +590,7 @@ static int at91_ssc_hw_params(struct snd_pcm_substream *substream, printk(KERN_WARNING "at91-ssc: request_irq failure\n"); DBG("Stopping pid %d clock\n", ssc_p->ssc.pid); - at91_sys_write(AT91_PMC_PCDR, 1<ssc.pid); + at91_sys_write(AT91_PMC_PCER, 1<ssc.pid); return ret; } diff --git a/trunk/sound/soc/s3c24xx/s3c24xx-i2s.c b/trunk/sound/soc/s3c24xx/s3c24xx-i2s.c index 1ed6afd45459..4ebcd6a8bf28 100644 --- a/trunk/sound/soc/s3c24xx/s3c24xx-i2s.c +++ b/trunk/sound/soc/s3c24xx/s3c24xx-i2s.c @@ -224,7 +224,6 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_cpu_dai *cpu_dai, iismod |= S3C2410_IISMOD_SLAVE; break; case SND_SOC_DAIFMT_CBS_CFS: - iismod &= ~S3C2410_IISMOD_SLAVE; break; default: return -EINVAL; @@ -235,7 +234,6 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_cpu_dai *cpu_dai, iismod |= S3C2410_IISMOD_MSB; break; case SND_SOC_DAIFMT_I2S: - iismod &= ~S3C2410_IISMOD_MSB; break; default: return -EINVAL; diff --git a/trunk/sound/soc/s3c24xx/s3c24xx-pcm.c b/trunk/sound/soc/s3c24xx/s3c24xx-pcm.c index 7806ae614617..6c70a81c730c 100644 --- a/trunk/sound/soc/s3c24xx/s3c24xx-pcm.c +++ b/trunk/sound/soc/s3c24xx/s3c24xx-pcm.c @@ -171,7 +171,7 @@ static int s3c24xx_pcm_hw_params(struct snd_pcm_substream *substream, ret = s3c2410_dma_request(prtd->params->channel, prtd->params->client, NULL); - if (ret < 0) { + if (ret) { DBG(KERN_ERR "failed to get dma channel\n"); return ret; } diff --git a/trunk/virt/kvm/kvm_main.c b/trunk/virt/kvm/kvm_main.c index f7ba099049ea..e89338e2b043 100644 --- a/trunk/virt/kvm/kvm_main.c +++ b/trunk/virt/kvm/kvm_main.c @@ -522,7 +522,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) return bad_hva(); return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); } -EXPORT_SYMBOL_GPL(gfn_to_hva); /* * Requires current->mm->mmap_sem to be held