diff --git a/[refs] b/[refs] index b945a1c4867e..37186383f66f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6cf24f031bc97cb5a7c9df3b6e73c45b628b2b28 +refs/heads/master: 8f820e976057261e249367514e9920cf20048c76 diff --git a/trunk/Documentation/DMA-API.txt b/trunk/Documentation/DMA-API.txt index 805db4b2cba6..05431621c861 100644 --- a/trunk/Documentation/DMA-API.txt +++ b/trunk/Documentation/DMA-API.txt @@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include Many drivers need lots of small dma-coherent memory regions for DMA descriptors or I/O buffers. Rather than allocating in units of a page or more using dma_alloc_coherent(), you can use DMA pools. These work -much like a struct kmem_cache, except that they use the dma-coherent allocator +much like a kmem_cache_t, except that they use the dma-coherent allocator not __get_free_pages(). Also, they understand common hardware constraints for alignment, like queue heads needing to be aligned on N byte boundaries. @@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers for use with a given device. It must be called in a context which can sleep. -The "name" is for diagnostics (like a struct kmem_cache name); dev and size +The "name" is for diagnostics (like a kmem_cache_t name); dev and size are like what you'd pass to dma_alloc_coherent(). The device's hardware alignment requirement for this type of data is "align" (which is expressed in bytes, and must be a power of two). If your device has no boundary @@ -431,10 +431,10 @@ be identical to those passed in (and returned by dma_alloc_noncoherent()). int -dma_is_consistent(struct device *dev, dma_addr_t dma_handle) +dma_is_consistent(dma_addr_t dma_handle) -returns true if the device dev is performing consistent DMA on the memory -area pointed to by the dma_handle. +returns true if the memory pointed to by the dma_handle is actually +consistent. int dma_get_cache_alignment(void) @@ -459,7 +459,7 @@ anything like this. You must also be extra careful about accessing memory you intend to sync partially. void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) Do a partial sync of memory that was allocated by diff --git a/trunk/Documentation/DocBook/Makefile b/trunk/Documentation/DocBook/Makefile index 36526a1e76d7..db9499adbed4 100644 --- a/trunk/Documentation/DocBook/Makefile +++ b/trunk/Documentation/DocBook/Makefile @@ -190,13 +190,9 @@ quiet_cmd_fig2png = FIG2PNG $@ ### # Help targets as used by the top-level makefile dochelp: - @echo ' Linux kernel internal documentation in different formats:' - @echo ' htmldocs - HTML' - @echo ' installmandocs - install man pages generated by mandocs' - @echo ' mandocs - man pages' - @echo ' pdfdocs - PDF' - @echo ' psdocs - Postscript' - @echo ' xmldocs - XML DocBook' + @echo ' Linux kernel internal documentation in different formats:' + @echo ' xmldocs (XML DocBook), psdocs (Postscript), pdfdocs (PDF)' + @echo ' htmldocs (HTML), mandocs (man pages, use installmandocs to install)' ### # Temporary files left by various tools diff --git a/trunk/Documentation/DocBook/kernel-api.tmpl b/trunk/Documentation/DocBook/kernel-api.tmpl index ca094913c555..a166675c4303 100644 --- a/trunk/Documentation/DocBook/kernel-api.tmpl +++ b/trunk/Documentation/DocBook/kernel-api.tmpl @@ -418,35 +418,9 @@ X!Edrivers/pnp/system.c !Idrivers/parport/daisy.c - - Message-based devices - Fusion message devices -!Edrivers/message/fusion/mptbase.c -!Idrivers/message/fusion/mptbase.c -!Edrivers/message/fusion/mptscsih.c -!Idrivers/message/fusion/mptscsih.c -!Idrivers/message/fusion/mptctl.c -!Idrivers/message/fusion/mptspi.c -!Idrivers/message/fusion/mptfc.c -!Idrivers/message/fusion/mptlan.c - - I2O message devices -!Iinclude/linux/i2o.h -!Idrivers/message/i2o/core.h -!Edrivers/message/i2o/iop.c -!Idrivers/message/i2o/iop.c -!Idrivers/message/i2o/config-osm.c -!Edrivers/message/i2o/exec-osm.c -!Idrivers/message/i2o/exec-osm.c -!Idrivers/message/i2o/bus-osm.c -!Edrivers/message/i2o/device.c -!Idrivers/message/i2o/device.c -!Idrivers/message/i2o/driver.c -!Idrivers/message/i2o/pci.c -!Idrivers/message/i2o/i2o_block.c -!Idrivers/message/i2o/i2o_scsi.c -!Idrivers/message/i2o/i2o_proc.c - + + Video4Linux +!Edrivers/media/video/videodev.c diff --git a/trunk/Documentation/IPMI.txt b/trunk/Documentation/IPMI.txt index 24dc3fcf1594..0e3924ecd76b 100644 --- a/trunk/Documentation/IPMI.txt +++ b/trunk/Documentation/IPMI.txt @@ -365,7 +365,6 @@ You can change this at module load time (for a module) with: regshifts=,,... slave_addrs=,,... force_kipmid=,,... - unload_when_empty=[0|1] Each of these except si_trydefaults is a list, the first item for the first interface, second item for the second interface, etc. @@ -417,11 +416,6 @@ by the driver, but systems with broken interrupts might need an enable, or users that don't want the daemon (don't need the performance, don't want the CPU hit) can disable it. -If unload_when_empty is set to 1, the driver will be unloaded if it -doesn't find any interfaces or all the interfaces fail to work. The -default is one. Setting to 0 is useful with the hotmod, but is -obviously only useful for modules. - When compiled into the kernel, the parameters can be specified on the kernel command line as: @@ -447,25 +441,6 @@ have high-res timers enabled in the kernel and you don't have interrupts enabled, the driver will run VERY slowly. Don't blame me, these interfaces suck. -The driver supports a hot add and remove of interfaces. This way, -interfaces can be added or removed after the kernel is up and running. -This is done using /sys/modules/ipmi_si/hotmod, which is a write-only -parameter. You write a string to this interface. The string has the -format: - [:op2[:op3...]] -The "op"s are: - add|remove,kcs|bt|smic,mem|i/o,
[,[,[,...]]] -You can specify more than one interface on the line. The "opt"s are: - rsp= - rsi= - rsh= - irq= - ipmb= -and these have the same meanings as discussed above. Note that you -can also use this on the kernel command line for a more compact format -for specifying an interface. Note that when removing an interface, -only the first three parameters (si type, address type, and address) -are used for the comparison. Any options are ignored for removing. The SMBus Driver ---------------- @@ -527,10 +502,7 @@ used to control it: modprobe ipmi_watchdog timeout= pretimeout= action= preaction= preop= start_now=x - nowayout=x ifnum_to_use=n - -ifnum_to_use specifies which interface the watchdog timer should use. -The default is -1, which means to pick the first one registered. + nowayout=x The timeout is the number of seconds to the action, and the pretimeout is the amount of seconds before the reset that the pre-timeout panic will @@ -652,9 +624,5 @@ command line. The parameter is also available via the proc filesystem in /proc/sys/dev/ipmi/poweroff_powercycle. Note that if the system does not support power cycling, it will always do the power off. -The "ifnum_to_use" parameter specifies which interface the poweroff -code should use. The default is -1, which means to pick the first one -registered. - Note that if you have ACPI enabled, the system will prefer using ACPI to power off. diff --git a/trunk/Documentation/block/as-iosched.txt b/trunk/Documentation/block/as-iosched.txt index a598fe10a297..e2a66f8143c5 100644 --- a/trunk/Documentation/block/as-iosched.txt +++ b/trunk/Documentation/block/as-iosched.txt @@ -24,10 +24,8 @@ very similar behavior to the deadline IO scheduler. Selecting IO schedulers ----------------------- To choose IO schedulers at boot time, use the argument 'elevator=deadline'. -'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are -assigned globally at boot time only presently. It's also possible to change -the IO scheduler for a determined device on the fly, as described in -Documentation/block/switching-sched.txt. +'noop' and 'as' (the default) are also available. IO schedulers are assigned +globally at boot time only presently. Anticipatory IO scheduler Policies diff --git a/trunk/Documentation/devices.txt b/trunk/Documentation/devices.txt index 8de132a02ba9..70690f1a14af 100644 --- a/trunk/Documentation/devices.txt +++ b/trunk/Documentation/devices.txt @@ -3,7 +3,7 @@ Maintained by Torben Mathiasen - Last revised: 29 November 2006 + Last revised: 15 May 2006 This list is the Linux Device List, the official registry of allocated device numbers and /dev directory nodes for the Linux operating @@ -94,7 +94,6 @@ Your cooperation is appreciated. 9 = /dev/urandom Faster, less secure random number gen. 10 = /dev/aio Asynchronous I/O notification interface 11 = /dev/kmsg Writes to this come out as printk's - 1 block RAM disk 0 = /dev/ram0 First RAM disk 1 = /dev/ram1 Second RAM disk @@ -123,7 +122,7 @@ Your cooperation is appreciated. devices are on major 128 and above and use the PTY master multiplex (/dev/ptmx) to acquire a PTY on demand. - + 2 block Floppy disks 0 = /dev/fd0 Controller 0, drive 0, autodetect 1 = /dev/fd1 Controller 0, drive 1, autodetect @@ -258,7 +257,7 @@ Your cooperation is appreciated. 129 = /dev/vcsa1 tty1 text/attribute contents ... 191 = /dev/vcsa63 tty63 text/attribute contents - + NOTE: These devices permit both read and write access. 7 block Loopback devices @@ -412,7 +411,7 @@ Your cooperation is appreciated. 207 = /dev/video/em8300_sp EM8300 DVD decoder subpicture 208 = /dev/compaq/cpqphpc Compaq PCI Hot Plug Controller 209 = /dev/compaq/cpqrid Compaq Remote Insight Driver - 210 = /dev/impi/bt IMPI coprocessor block transfer + 210 = /dev/impi/bt IMPI coprocessor block transfer 211 = /dev/impi/smic IMPI coprocessor stream interface 212 = /dev/watchdogs/0 First watchdog device 213 = /dev/watchdogs/1 Second watchdog device @@ -507,7 +506,6 @@ Your cooperation is appreciated. 33 = /dev/patmgr1 Sequencer patch manager 34 = /dev/midi02 Third MIDI port 50 = /dev/midi03 Fourth MIDI port - 14 block BIOS harddrive callback support {2.6} 0 = /dev/dos_hda First BIOS harddrive whole disk 64 = /dev/dos_hdb Second BIOS harddrive whole disk @@ -529,7 +527,6 @@ Your cooperation is appreciated. 16 char Non-SCSI scanners 0 = /dev/gs4500 Genius 4500 handheld scanner - 16 block GoldStar CD-ROM 0 = /dev/gscd GoldStar CD-ROM @@ -551,7 +548,6 @@ Your cooperation is appreciated. 0 = /dev/ttyC0 First Cyclades port ... 31 = /dev/ttyC31 32nd Cyclades port - 19 block "Double" compressed disk 0 = /dev/double0 First compressed disk ... @@ -567,7 +563,6 @@ Your cooperation is appreciated. 0 = /dev/cub0 Callout device for ttyC0 ... 31 = /dev/cub31 Callout device for ttyC31 - 20 block Hitachi CD-ROM (under development) 0 = /dev/hitcd Hitachi CD-ROM @@ -587,7 +582,7 @@ Your cooperation is appreciated. This device is used on the ARM-based Acorn RiscPC. Partitions are handled the same way as for IDE disks - (see major number 3). + (see major number 3). 22 char Digiboard serial card 0 = /dev/ttyD0 First Digiboard port @@ -596,7 +591,7 @@ Your cooperation is appreciated. 22 block Second IDE hard disk/CD-ROM interface 0 = /dev/hdc Master: whole disk (or CD-ROM) 64 = /dev/hdd Slave: whole disk (or CD-ROM) - + Partitions are handled the same way as for the first interface (see major number 3). @@ -644,7 +639,6 @@ Your cooperation is appreciated. 26 char Quanta WinVision frame grabber {2.6} 0 = /dev/wvisfgrab Quanta WinVision frame grabber - 26 block Second Matsushita (Panasonic/SoundBlaster) CD-ROM 0 = /dev/sbpcd4 Panasonic CD-ROM controller 1 unit 0 1 = /dev/sbpcd5 Panasonic CD-ROM controller 1 unit 1 @@ -676,7 +670,6 @@ Your cooperation is appreciated. 37 = /dev/nrawqft1 Unit 1, no rewind-on-close, no file marks 38 = /dev/nrawqft2 Unit 2, no rewind-on-close, no file marks 39 = /dev/nrawqft3 Unit 3, no rewind-on-close, no file marks - 27 block Third Matsushita (Panasonic/SoundBlaster) CD-ROM 0 = /dev/sbpcd8 Panasonic CD-ROM controller 2 unit 0 1 = /dev/sbpcd9 Panasonic CD-ROM controller 2 unit 1 @@ -688,7 +681,6 @@ Your cooperation is appreciated. 1 = /dev/staliomem1 Second Stallion card I/O memory 2 = /dev/staliomem2 Third Stallion card I/O memory 3 = /dev/staliomem3 Fourth Stallion card I/O memory - 28 char Atari SLM ACSI laser printer (68k/Atari) 0 = /dev/slm0 First SLM laser printer 1 = /dev/slm1 Second SLM laser printer @@ -698,7 +690,6 @@ Your cooperation is appreciated. 1 = /dev/sbpcd13 Panasonic CD-ROM controller 3 unit 1 2 = /dev/sbpcd14 Panasonic CD-ROM controller 3 unit 2 3 = /dev/sbpcd15 Panasonic CD-ROM controller 3 unit 3 - 28 block ACSI disk (68k/Atari) 0 = /dev/ada First ACSI disk whole disk 16 = /dev/adb Second ACSI disk whole disk @@ -759,7 +750,6 @@ Your cooperation is appreciated. 31 char MPU-401 MIDI 0 = /dev/mpu401data MPU-401 data port 1 = /dev/mpu401stat MPU-401 status port - 31 block ROM/flash memory card 0 = /dev/rom0 First ROM card (rw) ... @@ -811,7 +801,7 @@ Your cooperation is appreciated. 34 block Fourth IDE hard disk/CD-ROM interface 0 = /dev/hdg Master: whole disk (or CD-ROM) 64 = /dev/hdh Slave: whole disk (or CD-ROM) - + Partitions are handled the same way as for the first interface (see major number 3). @@ -828,7 +818,6 @@ Your cooperation is appreciated. 129 = /dev/smpte1 Second MIDI port, SMPTE timed 130 = /dev/smpte2 Third MIDI port, SMPTE timed 131 = /dev/smpte3 Fourth MIDI port, SMPTE timed - 35 block Slow memory ramdisk 0 = /dev/slram Slow memory ramdisk @@ -839,7 +828,6 @@ Your cooperation is appreciated. 16 = /dev/tap0 First Ethertap device ... 31 = /dev/tap15 16th Ethertap device - 36 block MCA ESDI hard disk 0 = /dev/eda First ESDI disk whole disk 64 = /dev/edb Second ESDI disk whole disk @@ -894,7 +882,6 @@ Your cooperation is appreciated. 40 char Matrox Meteor frame grabber {2.6} 0 = /dev/mmetfgrab Matrox Meteor frame grabber - 40 block Syquest EZ135 parallel port removable drive 0 = /dev/eza Parallel EZ135 drive, whole disk @@ -906,7 +893,6 @@ Your cooperation is appreciated. 41 char Yet Another Micro Monitor 0 = /dev/yamm Yet Another Micro Monitor - 41 block MicroSolutions BackPack parallel port CD-ROM 0 = /dev/bpcd BackPack CD-ROM @@ -915,7 +901,6 @@ Your cooperation is appreciated. the parallel port ATAPI CD-ROM driver at major number 46. 42 char Demo/sample use - 42 block Demo/sample use This number is intended for use in sample code, as @@ -933,7 +918,6 @@ Your cooperation is appreciated. 0 = /dev/ttyI0 First virtual modem ... 63 = /dev/ttyI63 64th virtual modem - 43 block Network block devices 0 = /dev/nb0 First network block device 1 = /dev/nb1 Second network block device @@ -950,13 +934,12 @@ Your cooperation is appreciated. 0 = /dev/cui0 Callout device for ttyI0 ... 63 = /dev/cui63 Callout device for ttyI63 - 44 block Flash Translation Layer (FTL) filesystems 0 = /dev/ftla FTL on first Memory Technology Device 16 = /dev/ftlb FTL on second Memory Technology Device 32 = /dev/ftlc FTL on third Memory Technology Device ... - 240 = /dev/ftlp FTL on 16th Memory Technology Device + 240 = /dev/ftlp FTL on 16th Memory Technology Device Partitions are handled in the same way as for IDE disks (see major number 3) except that the partition @@ -975,7 +958,6 @@ Your cooperation is appreciated. 191 = /dev/ippp63 64th SyncPPP device 255 = /dev/isdninfo ISDN monitor interface - 45 block Parallel port IDE disk devices 0 = /dev/pda First parallel port IDE disk 16 = /dev/pdb Second parallel port IDE disk @@ -1062,7 +1044,6 @@ Your cooperation is appreciated. 1 = /dev/dcbri1 Second DataComm card 2 = /dev/dcbri2 Third DataComm card 3 = /dev/dcbri3 Fourth DataComm card - 52 block Mylex DAC960 PCI RAID controller; fifth controller 0 = /dev/rd/c4d0 First disk, whole disk 8 = /dev/rd/c4d1 Second disk, whole disk @@ -1112,7 +1093,6 @@ Your cooperation is appreciated. 55 char DSP56001 digital signal processor 0 = /dev/dsp56k First DSP56001 - 55 block Mylex DAC960 PCI RAID controller; eighth controller 0 = /dev/rd/c7d0 First disk, whole disk 8 = /dev/rd/c7d1 Second disk, whole disk @@ -1150,7 +1130,6 @@ Your cooperation is appreciated. 0 = /dev/cup0 Callout device for ttyP0 1 = /dev/cup1 Callout device for ttyP1 ... - 58 block Reserved for logical volume manager 59 char sf firewall package @@ -1170,7 +1149,6 @@ Your cooperation is appreciated. NAMING CONFLICT -- PROPOSED REVISED NAME /dev/rpda0 etc 60-63 char LOCAL/EXPERIMENTAL USE - 60-63 block LOCAL/EXPERIMENTAL USE Allocated for local/experimental use. For devices not assigned official numbers, these ranges should be @@ -1456,6 +1434,7 @@ Your cooperation is appreciated. DAC960 (see major number 48) except that the limit on partitions is 15. + 78 char PAM Software's multimodem boards 0 = /dev/ttyM0 First PAM modem 1 = /dev/ttyM1 Second PAM modem @@ -1471,6 +1450,7 @@ Your cooperation is appreciated. DAC960 (see major number 48) except that the limit on partitions is 15. + 79 char PAM Software's multimodem boards - alternate devices 0 = /dev/cum0 Callout device for ttyM0 1 = /dev/cum1 Callout device for ttyM1 @@ -1486,6 +1466,7 @@ Your cooperation is appreciated. DAC960 (see major number 48) except that the limit on partitions is 15. + 80 char Photometrics AT200 CCD camera 0 = /dev/at200 Photometrics AT200 CCD camera @@ -1698,7 +1679,7 @@ Your cooperation is appreciated. 1 = /dev/dcxx1 Second capture card ... - 94 block IBM S/390 DASD block storage + 94 block IBM S/390 DASD block storage 0 = /dev/dasda First DASD device, major 1 = /dev/dasda1 First DASD device, block 1 2 = /dev/dasda2 First DASD device, block 2 @@ -1714,7 +1695,7 @@ Your cooperation is appreciated. 1 = /dev/ipnat NAT control device/log file 2 = /dev/ipstate State information log file 3 = /dev/ipauth Authentication control device/log file - ... + ... 96 char Parallel port ATAPI tape devices 0 = /dev/pt0 First parallel port ATAPI tape @@ -1724,7 +1705,7 @@ Your cooperation is appreciated. 129 = /dev/npt1 Second p.p. ATAPI tape, no rewind ... - 96 block Inverse NAND Flash Translation Layer + 96 block Inverse NAND Flash Translation Layer 0 = /dev/inftla First INFTL layer 16 = /dev/inftlb Second INFTL layer ... @@ -1956,6 +1937,7 @@ Your cooperation is appreciated. ... 113 block IBM iSeries virtual CD-ROM + 0 = /dev/iseries/vcda First virtual CD-ROM 1 = /dev/iseries/vcdb Second virtual CD-ROM ... @@ -2077,12 +2059,11 @@ Your cooperation is appreciated. ... 119 char VMware virtual network control - 0 = /dev/vmnet0 1st virtual network - 1 = /dev/vmnet1 2nd virtual network + 0 = /dev/vnet0 1st virtual network + 1 = /dev/vnet1 2nd virtual network ... 120-127 char LOCAL/EXPERIMENTAL USE - 120-127 block LOCAL/EXPERIMENTAL USE Allocated for local/experimental use. For devices not assigned official numbers, these ranges should be @@ -2094,6 +2075,7 @@ Your cooperation is appreciated. nodes; instead they should be accessed through the /dev/ptmx cloning interface. + 128 block SCSI disk devices (128-143) 0 = /dev/sddy 129th SCSI disk whole disk 16 = /dev/sddz 130th SCSI disk whole disk @@ -2105,6 +2087,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 129 block SCSI disk devices (144-159) 0 = /dev/sdeo 145th SCSI disk whole disk 16 = /dev/sdep 146th SCSI disk whole disk @@ -2140,6 +2123,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 132 block SCSI disk devices (192-207) 0 = /dev/sdgk 193rd SCSI disk whole disk 16 = /dev/sdgl 194th SCSI disk whole disk @@ -2151,6 +2135,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 133 block SCSI disk devices (208-223) 0 = /dev/sdha 209th SCSI disk whole disk 16 = /dev/sdhb 210th SCSI disk whole disk @@ -2162,6 +2147,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 134 block SCSI disk devices (224-239) 0 = /dev/sdhq 225th SCSI disk whole disk 16 = /dev/sdhr 226th SCSI disk whole disk @@ -2173,6 +2159,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 135 block SCSI disk devices (240-255) 0 = /dev/sdig 241st SCSI disk whole disk 16 = /dev/sdih 242nd SCSI disk whole disk @@ -2184,6 +2171,7 @@ Your cooperation is appreciated. disks (see major number 3) except that the limit on partitions is 15. + 136-143 char Unix98 PTY slaves 0 = /dev/pts/0 First Unix98 pseudo-TTY 1 = /dev/pts/1 Second Unix98 pesudo-TTY @@ -2396,7 +2384,6 @@ Your cooperation is appreciated. ... 159 char RESERVED - 159 block RESERVED 160 char General Purpose Instrument Bus (GPIB) @@ -2440,7 +2427,7 @@ Your cooperation is appreciated. Partitions are handled in the same way as for IDE disks (see major number 3) except that the limit on - partitions is 31. + partitions is 31. 162 char Raw block device interface 0 = /dev/rawctl Raw I/O control device @@ -2496,6 +2483,7 @@ Your cooperation is appreciated. 171 char Reserved for IEEE 1394 (Firewire) + 172 char Moxa Intellio serial card 0 = /dev/ttyMX0 First Moxa port 1 = /dev/ttyMX1 Second Moxa port @@ -2555,6 +2543,9 @@ Your cooperation is appreciated. 64 = /dev/usb/rio500 Diamond Rio 500 65 = /dev/usb/usblcd USBLCD Interface (info@usblcd.de) 66 = /dev/usb/cpad0 Synaptics cPad (mouse/LCD) + 67 = /dev/usb/adutux0 1st Ontrak ADU device + ... + 76 = /dev/usb/adutux10 10th Ontrak ADU device 96 = /dev/usb/hiddev0 1st USB HID device ... 111 = /dev/usb/hiddev15 16th USB HID device @@ -2567,7 +2558,7 @@ Your cooperation is appreciated. 132 = /dev/usb/idmouse ID Mouse (fingerprint scanner) device 133 = /dev/usb/sisusbvga1 First SiSUSB VGA device ... - 140 = /dev/usb/sisusbvga8 Eighth SISUSB VGA device + 140 = /dev/usb/sisusbvga8 Eigth SISUSB VGA device 144 = /dev/usb/lcd USB LCD device 160 = /dev/usb/legousbtower0 1st USB Legotower device ... @@ -2580,7 +2571,7 @@ Your cooperation is appreciated. 0 = /dev/uba First USB block device 8 = /dev/ubb Second USB block device 16 = /dev/ubc Third USB block device - ... + ... 181 char Conrad Electronic parallel port radio clocks 0 = /dev/pcfclock0 First Conrad radio clock @@ -2666,7 +2657,7 @@ Your cooperation is appreciated. 32 = /dev/mvideo/status2 Third device ... ... - 240 = /dev/mvideo/status15 16th device + 240 = /dev/mvideo/status15 16th device ... 195 char Nvidia graphics devices @@ -2804,10 +2795,6 @@ Your cooperation is appreciated. ... 185 = /dev/ttyNX15 Hilscher netX serial port 15 186 = /dev/ttyJ0 JTAG1 DCC protocol based serial port emulation - 187 = /dev/ttyUL0 Xilinx uartlite - port 0 - ... - 190 = /dev/ttyUL3 Xilinx uartlite - port 3 - 191 = /dev/xvc0 Xen virtual console - port 0 205 char Low-density serial ports (alternate device) 0 = /dev/culu0 Callout device for ttyLU0 @@ -2845,6 +2832,7 @@ Your cooperation is appreciated. 82 = /dev/cuvr0 Callout device for ttyVR0 83 = /dev/cuvr1 Callout device for ttyVR1 + 206 char OnStream SC-x0 tape devices 0 = /dev/osst0 First OnStream SCSI tape, mode 0 1 = /dev/osst1 Second OnStream SCSI tape, mode 0 @@ -2934,6 +2922,7 @@ Your cooperation is appreciated. ... 212 char LinuxTV.org DVB driver subsystem + 0 = /dev/dvb/adapter0/video0 first video decoder of first card 1 = /dev/dvb/adapter0/audio0 first audio decoder of first card 2 = /dev/dvb/adapter0/sec0 (obsolete/unused) @@ -3019,9 +3008,9 @@ Your cooperation is appreciated. 2 = /dev/3270/tub2 Second 3270 terminal ... -229 char IBM iSeries/pSeries virtual console - 0 = /dev/hvc0 First console port - 1 = /dev/hvc1 Second console port +229 char IBM iSeries virtual console + 0 = /dev/iseries/vtty0 First console port + 1 = /dev/iseries/vtty1 Second console port ... 230 char IBM iSeries virtual tape @@ -3094,14 +3083,12 @@ Your cooperation is appreciated. 234-239 UNASSIGNED 240-254 char LOCAL/EXPERIMENTAL USE - 240-254 block LOCAL/EXPERIMENTAL USE Allocated for local/experimental use. For devices not assigned official numbers, these ranges should be used in order to avoid conflicting with future assignments. 255 char RESERVED - 255 block RESERVED This major is reserved to assist the expansion to a @@ -3128,20 +3115,7 @@ Your cooperation is appreciated. 257 char Phoenix Technologies Cryptographic Services Driver 0 = /dev/ptlsec Crypto Services Driver -257 block SSFDC Flash Translation Layer filesystem - 0 = /dev/ssfdca First SSFDC layer - 8 = /dev/ssfdcb Second SSFDC layer - 16 = /dev/ssfdcc Third SSFDC layer - 24 = /dev/ssfdcd 4th SSFDC layer - 32 = /dev/ssfdce 5th SSFDC layer - 40 = /dev/ssfdcf 6th SSFDC layer - 48 = /dev/ssfdcg 7th SSFDC layer - 56 = /dev/ssfdch 8th SSFDC layer - -258 block ROM/Flash read-only translation layer - 0 = /dev/blockrom0 First ROM card's translation layer interface - 1 = /dev/blockrom1 Second ROM card's translation layer interface - ... + **** ADDITIONAL /dev DIRECTORY ENTRIES diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index 790ef6fbe495..eb1a6cad21e6 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -124,7 +124,7 @@ sync_fs: no no read write_super_lockfs: ? unlockfs: ? statfs: no no no -remount_fs: yes yes maybe (see below) +remount_fs: no yes maybe (see below) clear_inode: no umount_begin: yes no no show_options: no (vfsmount->sem) diff --git a/trunk/Documentation/filesystems/fuse.txt b/trunk/Documentation/filesystems/fuse.txt index 345392c4caeb..3d7447738958 100644 --- a/trunk/Documentation/filesystems/fuse.txt +++ b/trunk/Documentation/filesystems/fuse.txt @@ -51,22 +51,6 @@ homepage: http://fuse.sourceforge.net/ -Filesystem type -~~~~~~~~~~~~~~~ - -The filesystem type given to mount(2) can be one of the following: - -'fuse' - - This is the usual way to mount a FUSE filesystem. The first - argument of the mount system call may contain an arbitrary string, - which is not interpreted by the kernel. - -'fuseblk' - - The filesystem is block device based. The first argument of the - mount system call is interpreted as the name of the device. - Mount options ~~~~~~~~~~~~~ @@ -110,11 +94,6 @@ Mount options The default is infinite. Note that the size of read requests is limited anyway to 32 pages (which is 128kbyte on i386). -'blksize=N' - - Set the block size for the filesystem. The default is 512. This - option is only valid for 'fuseblk' type mounts. - Control filesystem ~~~~~~~~~~~~~~~~~~ diff --git a/trunk/Documentation/filesystems/sysv-fs.txt b/trunk/Documentation/filesystems/sysv-fs.txt index 253b50d1328e..d81722418010 100644 --- a/trunk/Documentation/filesystems/sysv-fs.txt +++ b/trunk/Documentation/filesystems/sysv-fs.txt @@ -1,8 +1,11 @@ +This is the implementation of the SystemV/Coherent filesystem for Linux. It implements all of - Xenix FS, - SystemV/386 FS, - Coherent FS. +This is version beta 4. + To install: * Answer the 'System V and Coherent filesystem support' question with 'y' when configuring the kernel. @@ -25,173 +28,11 @@ Bugs in the present implementation: for this FS on hard disk yet. -These filesystems are rather similar. Here is a comparison with Minix FS: - -* Linux fdisk reports on partitions - - Minix FS 0x81 Linux/Minix - - Xenix FS ?? - - SystemV FS ?? - - Coherent FS 0x08 AIX bootable - -* Size of a block or zone (data allocation unit on disk) - - Minix FS 1024 - - Xenix FS 1024 (also 512 ??) - - SystemV FS 1024 (also 512 and 2048) - - Coherent FS 512 - -* General layout: all have one boot block, one super block and - separate areas for inodes and for directories/data. - On SystemV Release 2 FS (e.g. Microport) the first track is reserved and - all the block numbers (including the super block) are offset by one track. - -* Byte ordering of "short" (16 bit entities) on disk: - - Minix FS little endian 0 1 - - Xenix FS little endian 0 1 - - SystemV FS little endian 0 1 - - Coherent FS little endian 0 1 - Of course, this affects only the file system, not the data of files on it! - -* Byte ordering of "long" (32 bit entities) on disk: - - Minix FS little endian 0 1 2 3 - - Xenix FS little endian 0 1 2 3 - - SystemV FS little endian 0 1 2 3 - - Coherent FS PDP-11 2 3 0 1 - Of course, this affects only the file system, not the data of files on it! - -* Inode on disk: "short", 0 means non-existent, the root dir ino is: - - Minix FS 1 - - Xenix FS, SystemV FS, Coherent FS 2 - -* Maximum number of hard links to a file: - - Minix FS 250 - - Xenix FS ?? - - SystemV FS ?? - - Coherent FS >=10000 - -* Free inode management: - - Minix FS a bitmap - - Xenix FS, SystemV FS, Coherent FS - There is a cache of a certain number of free inodes in the super-block. - When it is exhausted, new free inodes are found using a linear search. - -* Free block management: - - Minix FS a bitmap - - Xenix FS, SystemV FS, Coherent FS - Free blocks are organized in a "free list". Maybe a misleading term, - since it is not true that every free block contains a pointer to - the next free block. Rather, the free blocks are organized in chunks - of limited size, and every now and then a free block contains pointers - to the free blocks pertaining to the next chunk; the first of these - contains pointers and so on. The list terminates with a "block number" - 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS. - -* Super-block location: - - Minix FS block 1 = bytes 1024..2047 - - Xenix FS block 1 = bytes 1024..2047 - - SystemV FS bytes 512..1023 - - Coherent FS block 1 = bytes 512..1023 - -* Super-block layout: - - Minix FS - unsigned short s_ninodes; - unsigned short s_nzones; - unsigned short s_imap_blocks; - unsigned short s_zmap_blocks; - unsigned short s_firstdatazone; - unsigned short s_log_zone_size; - unsigned long s_max_size; - unsigned short s_magic; - - Xenix FS, SystemV FS, Coherent FS - unsigned short s_firstdatazone; - unsigned long s_nzones; - unsigned short s_fzone_count; - unsigned long s_fzones[NICFREE]; - unsigned short s_finode_count; - unsigned short s_finodes[NICINOD]; - char s_flock; - char s_ilock; - char s_modified; - char s_rdonly; - unsigned long s_time; - short s_dinfo[4]; -- SystemV FS only - unsigned long s_free_zones; - unsigned short s_free_inodes; - short s_dinfo[4]; -- Xenix FS only - unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only - char s_fname[6]; - char s_fpack[6]; - then they differ considerably: - Xenix FS - char s_clean; - char s_fill[371]; - long s_magic; - long s_type; - SystemV FS - long s_fill[12 or 14]; - long s_state; - long s_magic; - long s_type; - Coherent FS - unsigned long s_unique; - Note that Coherent FS has no magic. - -* Inode layout: - - Minix FS - unsigned short i_mode; - unsigned short i_uid; - unsigned long i_size; - unsigned long i_time; - unsigned char i_gid; - unsigned char i_nlinks; - unsigned short i_zone[7+1+1]; - - Xenix FS, SystemV FS, Coherent FS - unsigned short i_mode; - unsigned short i_nlink; - unsigned short i_uid; - unsigned short i_gid; - unsigned long i_size; - unsigned char i_zone[3*(10+1+1+1)]; - unsigned long i_atime; - unsigned long i_mtime; - unsigned long i_ctime; - -* Regular file data blocks are organized as - - Minix FS - 7 direct blocks - 1 indirect block (pointers to blocks) - 1 double-indirect block (pointer to pointers to blocks) - - Xenix FS, SystemV FS, Coherent FS - 10 direct blocks - 1 indirect block (pointers to blocks) - 1 double-indirect block (pointer to pointers to blocks) - 1 triple-indirect block (pointer to pointers to pointers to blocks) - -* Inode size, inodes per block - - Minix FS 32 32 - - Xenix FS 64 16 - - SystemV FS 64 16 - - Coherent FS 64 8 - -* Directory entry on disk - - Minix FS - unsigned short inode; - char name[14/30]; - - Xenix FS, SystemV FS, Coherent FS - unsigned short inode; - char name[14]; - -* Dir entry size, dir entries per block - - Minix FS 16/32 64/32 - - Xenix FS 16 64 - - SystemV FS 16 64 - - Coherent FS 16 32 - -* How to implement symbolic links such that the host fsck doesn't scream: - - Minix FS normal - - Xenix FS kludge: as regular files with chmod 1000 - - SystemV FS ?? - - Coherent FS kludge: as regular files with chmod 1000 +Please report any bugs and suggestions to + Bruno Haible + Pascal Haible + Krzysztof G. Baranowski +Bruno Haible + -Notation: We often speak of a "block" but mean a zone (the allocation unit) -and not the disk driver's notion of "block". diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 6b3c3e37a277..2e1898e4e8fd 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -650,10 +650,6 @@ and is between 256 and 4096 characters. It is defined in the file idle= [HW] Format: idle=poll or idle=halt - ignore_loglevel [KNL] - Ignore loglevel setting - this will print /all/ - kernel messages to the console. Useful for debugging. - ihash_entries= [KNL] Set number of hash buckets for inode cache. @@ -718,12 +714,7 @@ and is between 256 and 4096 characters. It is defined in the file Format: ,,, isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler. - Format: - ,..., - or - - (must be a positive range in ascending order) - or a mixture - ,...,- + Format: ,..., This option can be used to specify one or more CPUs to isolate from the general SMP balancing and scheduling algorithms. The only way to move a process onto or off @@ -1021,10 +1012,6 @@ and is between 256 and 4096 characters. It is defined in the file emulation library even if a 387 maths coprocessor is present. - noaliencache [MM, NUMA] Disables the allcoation of alien caches in - the slab allocator. Saves per-node memory, but will - impact performance on real NUMA hardware. - noalign [KNL,ARM] noapic [SMP,APIC] Tells the kernel to not make use of any @@ -1298,7 +1285,6 @@ and is between 256 and 4096 characters. It is defined in the file Param: "schedule" - profile schedule points. Param: - step/bucket size as a power of 2 for statistical time based profiling. - Param: "sleep" - profile D-state sleeping (millisecs) processor.max_cstate= [HW,ACPI] Limit processor to maximum C-state @@ -1380,12 +1366,6 @@ and is between 256 and 4096 characters. It is defined in the file resume= [SWSUSP] Specify the partition device for software suspend - resume_offset= [SWSUSP] - Specify the offset from the beginning of the partition - given by "resume=" at which the swap header is located, - in units (needed only for swap files). - See Documentation/power/swsusp-and-swap-files.txt - rhash_entries= [KNL,NET] Set number of hash buckets for route cache diff --git a/trunk/Documentation/power/s2ram.txt b/trunk/Documentation/power/s2ram.txt deleted file mode 100644 index b05f512130ea..000000000000 --- a/trunk/Documentation/power/s2ram.txt +++ /dev/null @@ -1,56 +0,0 @@ - How to get s2ram working - ~~~~~~~~~~~~~~~~~~~~~~~~ - 2006 Linus Torvalds - 2006 Pavel Machek - -1) Check suspend.sf.net, program s2ram there has long whitelist of - "known ok" machines, along with tricks to use on each one. - -2) If that does not help, try reading tricks.txt and - video.txt. Perhaps problem is as simple as broken module, and - simple module unload can fix it. - -3) You can use Linus' TRACE_RESUME infrastructure, described below. - - Using TRACE_RESUME - ~~~~~~~~~~~~~~~~~~ - -I've been working at making the machines I have able to STR, and almost -always it's a driver that is buggy. Thank God for the suspend/resume -debugging - the thing that Chuck tried to disable. That's often the _only_ -way to debug these things, and it's actually pretty powerful (but -time-consuming - having to insert TRACE_RESUME() markers into the device -driver that doesn't resume and recompile and reboot). - -Anyway, the way to debug this for people who are interested (have a -machine that doesn't boot) is: - - - enable PM_DEBUG, and PM_TRACE - - - use a script like this: - - #!/bin/sh - sync - echo 1 > /sys/power/pm_trace - echo mem > /sys/power/state - - to suspend - - - if it doesn't come back up (which is usually the problem), reboot by - holding the power button down, and look at the dmesg output for things - like - - Magic number: 4:156:725 - hash matches drivers/base/power/resume.c:28 - hash matches device 0000:01:00.0 - - which means that the last trace event was just before trying to resume - device 0000:01:00.0. Then figure out what driver is controlling that - device (lspci and /sys/devices/pci* is your friend), and see if you can - fix it, disable it, or trace into its resume function. - -For example, the above happens to be the VGA device on my EVO, which I -used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out -that "radeonfb" simply cannot resume that device - it tries to set the -PLL's, and it just _hangs_. Using the regular VGA console and letting X -resume it instead works fine. diff --git a/trunk/Documentation/power/swsusp-and-swap-files.txt b/trunk/Documentation/power/swsusp-and-swap-files.txt deleted file mode 100644 index 06f911a5f885..000000000000 --- a/trunk/Documentation/power/swsusp-and-swap-files.txt +++ /dev/null @@ -1,60 +0,0 @@ -Using swap files with software suspend (swsusp) - (C) 2006 Rafael J. Wysocki - -The Linux kernel handles swap files almost in the same way as it handles swap -partitions and there are only two differences between these two types of swap -areas: -(1) swap files need not be contiguous, -(2) the header of a swap file is not in the first block of the partition that -holds it. From the swsusp's point of view (1) is not a problem, because it is -already taken care of by the swap-handling code, but (2) has to be taken into -consideration. - -In principle the location of a swap file's header may be determined with the -help of appropriate filesystem driver. Unfortunately, however, it requires the -filesystem holding the swap file to be mounted, and if this filesystem is -journaled, it cannot be mounted during resume from disk. For this reason to -identify a swap file swsusp uses the name of the partition that holds the file -and the offset from the beginning of the partition at which the swap file's -header is located. For convenience, this offset is expressed in -units. - -In order to use a swap file with swsusp, you need to: - -1) Create the swap file and make it active, eg. - -# dd if=/dev/zero of= bs=1024 count= -# mkswap -# swapon - -2) Use an application that will bmap the swap file with the help of the -FIBMAP ioctl and determine the location of the file's swap header, as the -offset, in units, from the beginning of the partition which -holds the swap file. - -3) Add the following parameters to the kernel command line: - -resume= resume_offset= - -where is the partition on which the swap file is located -and is the offset of the swap header determined by the -application in 2) (of course, this step may be carried out automatically -by the same application that determies the swap file's header offset using the -FIBMAP ioctl) - -OR - -Use a userland suspend application that will set the partition and offset -with the help of the SNAPSHOT_SET_SWAP_AREA ioctl described in -Documentation/power/userland-swsusp.txt (this is the only method to suspend -to a swap file allowing the resume to be initiated from an initrd or initramfs -image). - -Now, swsusp will use the swap file in the same way in which it would use a swap -partition. In particular, the swap file has to be active (ie. be present in -/proc/swaps) so that it can be used for suspending. - -Note that if the swap file used for suspending is deleted and recreated, -the location of its header need not be the same as before. Thus every time -this happens the value of the "resume_offset=" kernel command line parameter -has to be updated. diff --git a/trunk/Documentation/power/swsusp.txt b/trunk/Documentation/power/swsusp.txt index 0761ff6c57ed..e635e6f1e316 100644 --- a/trunk/Documentation/power/swsusp.txt +++ b/trunk/Documentation/power/swsusp.txt @@ -297,12 +297,20 @@ system is shut down or suspended. Additionally use the encrypted suspend image to prevent sensitive data from being stolen after resume. -Q: Can I suspend to a swap file? +Q: Why can't we suspend to a swap file? -A: Generally, yes, you can. However, it requires you to use the "resume=" and -"resume_offset=" kernel command line parameters, so the resume from a swap file -cannot be initiated from an initrd or initramfs image. See -swsusp-and-swap-files.txt for details. +A: Because accessing swap file needs the filesystem mounted, and +filesystem might do something wrong (like replaying the journal) +during mount. + +There are few ways to get that fixed: + +1) Probably could be solved by modifying every filesystem to support +some kind of "really read-only!" option. Patches welcome. + +2) suspend2 gets around that by storing absolute positions in on-disk +image (and blocksize), with resume parameter pointing directly to +suspend header. Q: Is there a maximum system RAM size that is supported by swsusp? diff --git a/trunk/Documentation/power/userland-swsusp.txt b/trunk/Documentation/power/userland-swsusp.txt index 000556c932e9..64755e9285db 100644 --- a/trunk/Documentation/power/userland-swsusp.txt +++ b/trunk/Documentation/power/userland-swsusp.txt @@ -9,8 +9,9 @@ done it already. Now, to use the userland interface for software suspend you need special utilities that will read/write the system memory snapshot from/to the kernel. Such utilities are available, for example, from -. You may want to have a look at them if you -are going to develop your own suspend/resume utilities. +. You may want to have +a look at them if you are going to develop your own suspend/resume +utilities. The interface consists of a character device providing the open(), release(), read(), and write() operations as well as several ioctl() @@ -20,9 +21,9 @@ be read from /sys/class/misc/snapshot/dev. The device can be open either for reading or for writing. If open for reading, it is considered to be in the suspend mode. Otherwise it is -assumed to be in the resume mode. The device cannot be open for simultaneous -reading and writing. It is also impossible to have the device open more than -once at a time. +assumed to be in the resume mode. The device cannot be open for reading +and writing. It is also impossible to have the device open more than once +at a time. The ioctl() commands recognized by the device are: @@ -68,46 +69,9 @@ SNAPSHOT_FREE_SWAP_PAGES - free all swap pages allocated with SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument should specify the device's major and minor numbers in the old two-byte format, as returned by the stat() function in the .st_rdev - member of the stat structure) - -SNAPSHOT_SET_SWAP_AREA - set the resume partition and the offset (in - units) from the beginning of the partition at which the swap header is - located (the last ioctl() argument should point to a struct - resume_swap_area, as defined in kernel/power/power.h, containing the - resume device specification, as for the SNAPSHOT_SET_SWAP_FILE ioctl(), - and the offset); for swap partitions the offset is always 0, but it is - different to zero for swap files (please see - Documentation/swsusp-and-swap-files.txt for details). - The SNAPSHOT_SET_SWAP_AREA ioctl() is considered as a replacement for - SNAPSHOT_SET_SWAP_FILE which is regarded as obsolete. It is - recommended to always use this call, because the code to set the resume - partition may be removed from future kernels - -SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to - immediately enter the suspend-to-RAM state, so this call must always - be preceded by the SNAPSHOT_FREEZE call and it is also necessary - to use the SNAPSHOT_UNFREEZE call after the system wakes up. This call - is needed to implement the suspend-to-both mechanism in which the - suspend image is first created, as though the system had been suspended - to disk, and then the system is suspended to RAM (this makes it possible - to resume the system from RAM if there's enough battery power or restore - its state on the basis of the saved suspend image otherwise) - -SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and - pmops->finish methods (the in-kernel swsusp knows these as the "platform - method") which are needed on many machines to (among others) speed up - the resume by letting the BIOS skip some steps or to let the system - recognise the correct state of the hardware after the resume (in - particular on many machines this ensures that unplugged AC - adapters get correctly detected and that kacpid does not run wild after - the resume). The last ioctl() argument can take one of the three - values, defined in kernel/power/power.h: - PMOPS_PREPARE - make the kernel carry out the - pm_ops->prepare(PM_SUSPEND_DISK) operation - PMOPS_ENTER - make the kernel power off the system by calling - pm_ops->enter(PM_SUSPEND_DISK) - PMOPS_FINISH - make the kernel carry out the - pm_ops->finish(PM_SUSPEND_DISK) operation + member of the stat structure); it is recommended to always use this + call, because the code to set the resume partition could be removed from + future kernels The device's read() operation can be used to transfer the snapshot image from the kernel. It has the following limitations: @@ -127,12 +91,10 @@ unfreeze user space processes frozen by SNAPSHOT_UNFREEZE if they are still frozen when the device is being closed). Currently it is assumed that the userland utilities reading/writing the -snapshot image from/to the kernel will use a swap parition, called the resume -partition, or a swap file as storage space (if a swap file is used, the resume -partition is the partition that holds this file). However, this is not really -required, as they can use, for example, a special (blank) suspend partition or -a file on a partition that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and -mounted afterwards. +snapshot image from/to the kernel will use a swap partition, called the resume +partition, as storage space. However, this is not really required, as they +can use, for example, a special (blank) suspend partition or a file on a partition +that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards. These utilities SHOULD NOT make any assumptions regarding the ordering of data within the snapshot image, except for the image header that MAY be diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index cf24400213f8..5dff26814a06 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -1091,19 +1091,13 @@ M: miku@iki.fi S: Maintained EXT2 FILE SYSTEM -L: linux-ext4@vger.kernel.org +L: ext2-devel@lists.sourceforge.net S: Maintained EXT3 FILE SYSTEM P: Stephen Tweedie, Andrew Morton M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com -L: linux-ext4@vger.kernel.org -S: Maintained - -EXT4 FILE SYSTEM -P: Stephen Tweedie, Andrew Morton -M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com -L: linux-ext4@vger.kernel.org +L: ext2-devel@lists.sourceforge.net S: Maintained F71805F HARDWARE MONITORING DRIVER @@ -1220,8 +1214,7 @@ HARDWARE MONITORING P: Jean Delvare M: khali@linux-fr.org L: lm-sensors@lm-sensors.org -W: http://www.lm-sensors.org/ -T: quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-hwmon/ +W: http://www.lm-sensors.nu/ S: Maintained HARDWARE RANDOM NUMBER GENERATOR CORE @@ -1347,7 +1340,8 @@ I2C SUBSYSTEM P: Jean Delvare M: khali@linux-fr.org L: i2c@lm-sensors.org -T: quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-i2c/ +W: http://www.lm-sensors.nu/ +T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ S: Maintained I2O @@ -1679,7 +1673,7 @@ S: Supported JOURNALLING LAYER FOR BLOCK DEVICES (JBD) P: Stephen Tweedie, Andrew Morton M: sct@redhat.com, akpm@osdl.org -L: linux-ext4@vger.kernel.org +L: ext2-devel@lists.sourceforge.net S: Maintained K8TEMP HARDWARE MONITORING DRIVER @@ -2919,6 +2913,7 @@ S: Maintained SUN3/3X P: Sam Creasey M: sammy@sammy.net +L: sun3-list@redhat.com W: http://sammy.net/sun3/ S: Maintained @@ -3459,12 +3454,6 @@ W: http://oss.sgi.com/projects/xfs T: git git://oss.sgi.com:8090/xfs/xfs-2.6 S: Supported -XILINX UARTLITE SERIAL DRIVER -P: Peter Korsgaard -M: jacmet@sunsite.dk -L: linux-serial@vger.kernel.org -S: Maintained - X86 3-LEVEL PAGING (PAE) SUPPORT P: Ingo Molnar M: mingo@redhat.com diff --git a/trunk/README b/trunk/README index c05561523029..3e264723b863 100644 --- a/trunk/README +++ b/trunk/README @@ -1,4 +1,4 @@ - Linux kernel release 2.6.xx + Linux kernel release 2.6.xx These are the release notes for Linux version 2.6. Read them carefully, as they tell you what this is all about, explain how to install the @@ -22,17 +22,15 @@ ON WHAT HARDWARE DOES IT RUN? Although originally developed first for 32-bit x86-based PCs (386 or higher), today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and - UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell, + UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS, - Cris, Xtensa, AVR32 and Renesas M32R architectures. + and Renesas M32R architectures. Linux is easily portable to most general-purpose 32- or 64-bit architectures as long as they have a paged memory management unit (PMMU) and a port of the GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has also been ported to a number of architectures without a PMMU, although functionality is then obviously somewhat limited. - Linux has also been ported to itself. You can now run the kernel as a - userspace application - this is called UserMode Linux (UML). DOCUMENTATION: @@ -115,7 +113,6 @@ INSTALLING the kernel: version 2.6.12.2 and want to jump to 2.6.12.3, you must first reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying the 2.6.12.3 patch. - You can read more on this in Documentation/applying-patches.txt - Make sure you have no stale .o files and dependencies lying around: @@ -164,7 +161,6 @@ CONFIGURING the kernel: only ask you for the answers to new questions. - Alternate configuration commands are: - "make config" Plain text interface. "make menuconfig" Text based color menus, radiolists & dialogs. "make xconfig" X windows (Qt) based configuration tool. "make gconfig" X windows (Gtk) based configuration tool. @@ -307,9 +303,8 @@ IF SOMETHING GOES WRONG: - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump as is, otherwise you will have to use the "ksymoops" program to make - sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred). - This utility can be downloaded from - ftp://ftp..kernel.org/pub/linux/utils/kernel/ksymoops/ . + sense of the dump. This utility can be downloaded from + ftp://ftp..kernel.org/pub/linux/utils/kernel/ksymoops. Alternately you can do the dump lookup by hand: - In debugging dumps like the above, it helps enormously if you can @@ -341,7 +336,7 @@ IF SOMETHING GOES WRONG: If you for some reason cannot do the above (you have a pre-compiled kernel image or similar), telling me as much about your setup as - possible will help. Please read the REPORTING-BUGS document for details. + possible will help. - Alternately, you can use gdb on a running kernel. (read-only; i.e. you cannot change values or set break points.) To do this, first compile the diff --git a/trunk/REPORTING-BUGS b/trunk/REPORTING-BUGS index ac02e42a2627..f9da827a0c18 100644 --- a/trunk/REPORTING-BUGS +++ b/trunk/REPORTING-BUGS @@ -40,9 +40,7 @@ summary from [1.]>" for easy identification by the developers. [1.] One line summary of the problem: [2.] Full description of the problem/report: [3.] Keywords (i.e., modules, networking, kernel): -[4.] Kernel information -[4.1.] Kernel version (from /proc/version): -[4.2.] Kernel .config file: +[4.] Kernel version (from /proc/version): [5.] Most recent kernel version which did not have the bug: [6.] Output of Oops.. message (if applicable) with symbolic information resolved (see Documentation/oops-tracing.txt) diff --git a/trunk/arch/alpha/kernel/pci.c b/trunk/arch/alpha/kernel/pci.c index 3c10b9a1ddf5..ffb7d5423cc0 100644 --- a/trunk/arch/alpha/kernel/pci.c +++ b/trunk/arch/alpha/kernel/pci.c @@ -516,11 +516,10 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) if (bus == 0 && dfn == 0) { hose = pci_isa_hose; } else { - dev = pci_get_bus_and_slot(bus, dfn); + dev = pci_find_slot(bus, dfn); if (!dev) return -ENODEV; hose = dev->sysdata; - pci_dev_put(dev); } } diff --git a/trunk/arch/alpha/kernel/sys_miata.c b/trunk/arch/alpha/kernel/sys_miata.c index 910b43cd63e8..b8b817feb1ee 100644 --- a/trunk/arch/alpha/kernel/sys_miata.c +++ b/trunk/arch/alpha/kernel/sys_miata.c @@ -183,15 +183,11 @@ miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin) if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) { u8 irq=0; - struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7); - if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) { - pci_dev_put(pdev); + + if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL) return -1; - } - else { - pci_dev_put(pdev); + else return irq; - } } return COMMON_TABLE_LOOKUP; diff --git a/trunk/arch/alpha/kernel/sys_nautilus.c b/trunk/arch/alpha/kernel/sys_nautilus.c index e7594a7cf585..93744bab73fb 100644 --- a/trunk/arch/alpha/kernel/sys_nautilus.c +++ b/trunk/arch/alpha/kernel/sys_nautilus.c @@ -200,7 +200,7 @@ nautilus_init_pci(void) bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); hose->bus = bus; - irongate = pci_get_bus_and_slot(0, 0); + irongate = pci_find_slot(0, 0); bus->self = irongate; bus->resource[1] = &irongate_mem; diff --git a/trunk/arch/alpha/mm/fault.c b/trunk/arch/alpha/mm/fault.c index 8aa9db834c11..8871529a34e2 100644 --- a/trunk/arch/alpha/mm/fault.c +++ b/trunk/arch/alpha/mm/fault.c @@ -108,7 +108,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If we're in an interrupt context, or have no user context, we must not take the fault. */ - if (!mm || in_atomic()) + if (!mm || in_interrupt()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/trunk/arch/arm/common/sharpsl_pm.c b/trunk/arch/arm/common/sharpsl_pm.c index b3599743093b..605dedf96790 100644 --- a/trunk/arch/arm/common/sharpsl_pm.c +++ b/trunk/arch/arm/common/sharpsl_pm.c @@ -60,16 +60,16 @@ static int sharpsl_ac_check(void); static int sharpsl_fatal_check(void); static int sharpsl_average_value(int ad); static void sharpsl_average_clear(void); -static void sharpsl_charge_toggle(struct work_struct *private_); -static void sharpsl_battery_thread(struct work_struct *private_); +static void sharpsl_charge_toggle(void *private_); +static void sharpsl_battery_thread(void *private_); /* * Variables */ struct sharpsl_pm_status sharpsl_pm; -DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); -DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); +DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); +DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); @@ -116,7 +116,7 @@ void sharpsl_battery_kick(void) EXPORT_SYMBOL(sharpsl_battery_kick); -static void sharpsl_battery_thread(struct work_struct *private_) +static void sharpsl_battery_thread(void *private_) { int voltage, percent, apm_status, i = 0; @@ -128,7 +128,7 @@ static void sharpsl_battery_thread(struct work_struct *private_) /* Corgi cannot confirm when battery fully charged so periodically kick! */ if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) - schedule_delayed_work(&toggle_charger, 0); + schedule_work(&toggle_charger); while(1) { voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); @@ -212,7 +212,7 @@ static void sharpsl_charge_off(void) sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.charge_mode = CHRG_OFF; - schedule_delayed_work(&sharpsl_bat, 0); + schedule_work(&sharpsl_bat); } static void sharpsl_charge_error(void) @@ -222,7 +222,7 @@ static void sharpsl_charge_error(void) sharpsl_pm.charge_mode = CHRG_ERROR; } -static void sharpsl_charge_toggle(struct work_struct *private_) +static void sharpsl_charge_toggle(void *private_) { dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); @@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data) else if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_charge_off(); - schedule_delayed_work(&sharpsl_bat, 0); + schedule_work(&sharpsl_bat); } @@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data) sharpsl_charge_off(); } else if (sharpsl_pm.full_count < 2) { dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); - schedule_delayed_work(&toggle_charger, 0); + schedule_work(&toggle_charger); } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); - schedule_delayed_work(&toggle_charger, 0); + schedule_work(&toggle_charger); } else { sharpsl_charge_off(); sharpsl_pm.charge_mode = CHRG_DONE; diff --git a/trunk/arch/arm/kernel/signal.c b/trunk/arch/arm/kernel/signal.c index f38a60a03b8c..48cf7fffddf2 100644 --- a/trunk/arch/arm/kernel/signal.c +++ b/trunk/arch/arm/kernel/signal.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/arm/mach-omap1/board-h3.c b/trunk/arch/arm/mach-omap1/board-h3.c index 9d2346fb68f4..f225a083dee1 100644 --- a/trunk/arch/arm/mach-omap1/board-h3.c +++ b/trunk/arch/arm/mach-omap1/board-h3.c @@ -323,8 +323,7 @@ static int h3_transceiver_mode(struct device *dev, int mode) cancel_delayed_work(&irda_config->gpio_expa); PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); -#error this is not permitted - mode is an argument variable - schedule_delayed_work(&irda_config->gpio_expa, 0); + schedule_work(&irda_config->gpio_expa); return 0; } diff --git a/trunk/arch/arm/mach-omap1/board-nokia770.c b/trunk/arch/arm/mach-omap1/board-nokia770.c index cbe909bad79b..dbc555d209ff 100644 --- a/trunk/arch/arm/mach-omap1/board-nokia770.c +++ b/trunk/arch/arm/mach-omap1/board-nokia770.c @@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = { .rows = 8, .cols = 8, .keymap = nokia770_keymap, - .keymapsize = ARRAY_SIZE(nokia770_keymap), + .keymapsize = ARRAY_SIZE(nokia770_keymap) .delay = 4, }; @@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void) printk("HP connected\n"); } -static void codec_delayed_power_down(struct work_struct *work) +static void codec_delayed_power_down(void *arg) { down(&audio_pwr_sem); if (audio_pwr_state == -1) @@ -200,7 +200,7 @@ static void codec_delayed_power_down(struct work_struct *work) up(&audio_pwr_sem); } -static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); +static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); static void nokia770_audio_pwr_down(void) { diff --git a/trunk/arch/arm/mach-omap1/devices.c b/trunk/arch/arm/mach-omap1/devices.c index 6dcd10ab4496..a611c3b63954 100644 --- a/trunk/arch/arm/mach-omap1/devices.c +++ b/trunk/arch/arm/mach-omap1/devices.c @@ -55,7 +55,7 @@ static inline void omap_init_irda(void) {} /*-------------------------------------------------------------------------*/ -#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) +#if defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC) #define OMAP_RTC_BASE 0xfffb4800 diff --git a/trunk/arch/arm/mach-omap1/leds-osk.c b/trunk/arch/arm/mach-omap1/leds-osk.c index 0cbf1b0071f8..3b29e59b0e6f 100644 --- a/trunk/arch/arm/mach-omap1/leds-osk.c +++ b/trunk/arch/arm/mach-omap1/leds-osk.c @@ -35,7 +35,7 @@ static u8 hw_led_state; static u8 tps_leds_change; -static void tps_work(struct work_struct *unused) +static void tps_work(void *unused) { for (;;) { u8 leds; @@ -61,7 +61,7 @@ static void tps_work(struct work_struct *unused) } } -static DECLARE_WORK(work, tps_work); +static DECLARE_WORK(work, tps_work, NULL); #ifdef CONFIG_OMAP_OSK_MISTRAL diff --git a/trunk/arch/arm/mach-omap2/board-h4.c b/trunk/arch/arm/mach-omap2/board-h4.c index 3b1ad1d981a3..26a95a642ad7 100644 --- a/trunk/arch/arm/mach-omap2/board-h4.c +++ b/trunk/arch/arm/mach-omap2/board-h4.c @@ -206,8 +206,7 @@ static int h4_transceiver_mode(struct device *dev, int mode) cancel_delayed_work(&irda_config->gpio_expa); PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); -#error this is not permitted - mode is an argument variable - schedule_delayed_work(&irda_config->gpio_expa, 0); + schedule_work(&irda_config->gpio_expa); return 0; } diff --git a/trunk/arch/arm/mach-pxa/akita-ioexp.c b/trunk/arch/arm/mach-pxa/akita-ioexp.c index 12d2fe0ceff6..1b398742ab56 100644 --- a/trunk/arch/arm/mach-pxa/akita-ioexp.c +++ b/trunk/arch/arm/mach-pxa/akita-ioexp.c @@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD; static int max7310_write(struct i2c_client *client, int address, int data); static struct i2c_client max7310_template; -static void akita_ioexp_work(struct work_struct *private_); +static void akita_ioexp_work(void *private_); static struct device *akita_ioexp_device; static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; -DECLARE_WORK(akita_ioexp, akita_ioexp_work); +DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); /* @@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit) EXPORT_SYMBOL(akita_set_ioexp); EXPORT_SYMBOL(akita_reset_ioexp); -static void akita_ioexp_work(struct work_struct *private_) +static void akita_ioexp_work(void *private_) { if (akita_ioexp_device) max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); diff --git a/trunk/arch/arm/mach-s3c2410/dma.c b/trunk/arch/arm/mach-s3c2410/dma.c index 01abb0ace234..3d211dc2f2f9 100644 --- a/trunk/arch/arm/mach-s3c2410/dma.c +++ b/trunk/arch/arm/mach-s3c2410/dma.c @@ -40,7 +40,7 @@ /* io map for dma */ static void __iomem *dma_base; -static struct kmem_cache *dma_kmem; +static kmem_cache_t *dma_kmem; struct s3c24xx_dma_selection dma_sel; @@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = { /* kmem cache implementation */ -static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f) +static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f) { memset(p, 0, sizeof(struct s3c2410_dma_buf)); } diff --git a/trunk/arch/arm/mm/fault.c b/trunk/arch/arm/mm/fault.c index 9fd6d2eafb40..5e658a874498 100644 --- a/trunk/arch/arm/mm/fault.c +++ b/trunk/arch/arm/mm/fault.c @@ -230,7 +230,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; /* diff --git a/trunk/arch/arm26/kernel/ecard.c b/trunk/arch/arm26/kernel/ecard.c index 43dd41be71fb..047d0a408b9d 100644 --- a/trunk/arch/arm26/kernel/ecard.c +++ b/trunk/arch/arm26/kernel/ecard.c @@ -620,10 +620,12 @@ ecard_probe(int slot, card_type_t type) struct ex_ecid cid; int i, rc = -ENOMEM; - ec = kzalloc(sizeof(ecard_t), GFP_KERNEL); + ec = kmalloc(sizeof(ecard_t), GFP_KERNEL); if (!ec) goto nomem; + memset(ec, 0, sizeof(ecard_t)); + ec->slot_no = slot; ec->type = type; ec->irq = NO_IRQ; diff --git a/trunk/arch/arm26/mm/fault.c b/trunk/arch/arm26/mm/fault.c index 93c0cee0fb5e..a1f6d8a9cc32 100644 --- a/trunk/arch/arm26/mm/fault.c +++ b/trunk/arch/arm26/mm/fault.c @@ -215,7 +215,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/trunk/arch/arm26/mm/memc.c b/trunk/arch/arm26/mm/memc.c index f2901581d4da..34def6397c3c 100644 --- a/trunk/arch/arm26/mm/memc.c +++ b/trunk/arch/arm26/mm/memc.c @@ -24,7 +24,7 @@ #define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) -struct kmem_cache *pte_cache, *pgd_cache; +kmem_cache_t *pte_cache, *pgd_cache; int page_nr; /* @@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi) { } -static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags) +static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) { memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); } -static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags) +static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) { memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); } diff --git a/trunk/arch/avr32/kernel/kprobes.c b/trunk/arch/avr32/kernel/kprobes.c index d0abbcaf1c1e..ca41fc1edbe1 100644 --- a/trunk/arch/avr32/kernel/kprobes.c +++ b/trunk/arch/avr32/kernel/kprobes.c @@ -154,7 +154,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) return 1; no_kprobe: - preempt_enable_no_resched(); return ret; } diff --git a/trunk/arch/avr32/kernel/signal.c b/trunk/arch/avr32/kernel/signal.c index 0ec14854a200..33096651c24f 100644 --- a/trunk/arch/avr32/kernel/signal.c +++ b/trunk/arch/avr32/kernel/signal.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/arch/avr32/mm/dma-coherent.c b/trunk/arch/avr32/mm/dma-coherent.c index b68d669f823d..44ab8a7bdae2 100644 --- a/trunk/arch/avr32/mm/dma-coherent.c +++ b/trunk/arch/avr32/mm/dma-coherent.c @@ -11,7 +11,7 @@ #include #include -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) +void dma_cache_sync(void *vaddr, size_t size, int direction) { /* * No need to sync an uncached area diff --git a/trunk/arch/cris/mm/fault.c b/trunk/arch/cris/mm/fault.c index c73e91f1299a..934c51078cce 100644 --- a/trunk/arch/cris/mm/fault.c +++ b/trunk/arch/cris/mm/fault.c @@ -232,7 +232,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/trunk/arch/frv/kernel/futex.c b/trunk/arch/frv/kernel/futex.c index 14f64b054c7e..eae874a970c6 100644 --- a/trunk/arch/frv/kernel/futex.c +++ b/trunk/arch/frv/kernel/futex.c @@ -10,9 +10,9 @@ */ #include -#include #include #include +#include /* * the various futex operations; MMU fault checking is ignored under no-MMU @@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) break; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/arch/frv/kernel/setup.c b/trunk/arch/frv/kernel/setup.c index 1a5eb6c301c9..a8c61dac1cee 100644 --- a/trunk/arch/frv/kernel/setup.c +++ b/trunk/arch/frv/kernel/setup.c @@ -947,7 +947,7 @@ static void __init setup_linux_memory(void) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) { reserve_bootmem(INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET; + initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start + INITRD_SIZE; } else { diff --git a/trunk/arch/frv/kernel/signal.c b/trunk/arch/frv/kernel/signal.c index 85baeae9666a..b8a5882b8625 100644 --- a/trunk/arch/frv/kernel/signal.c +++ b/trunk/arch/frv/kernel/signal.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/arch/frv/mm/fault.c b/trunk/arch/frv/mm/fault.c index 3f12296c3688..8b3eb50c5105 100644 --- a/trunk/arch/frv/mm/fault.c +++ b/trunk/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/trunk/arch/frv/mm/pgalloc.c b/trunk/arch/frv/mm/pgalloc.c index 19b13be114a2..f76dd03ddd99 100644 --- a/trunk/arch/frv/mm/pgalloc.c +++ b/trunk/arch/frv/mm/pgalloc.c @@ -18,7 +18,7 @@ #include pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); -struct kmem_cache *pgd_cache; +kmem_cache_t *pgd_cache; pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd) set_page_private(next, (unsigned long) pprev); } -void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) +void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; @@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) } /* never called when PTRS_PER_PMD > 1 */ -void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) +void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; /* can be called from interrupt context */ diff --git a/trunk/arch/h8300/kernel/setup.c b/trunk/arch/h8300/kernel/setup.c index 6adf8f41d2a1..1077b71d5226 100644 --- a/trunk/arch/h8300/kernel/setup.c +++ b/trunk/arch/h8300/kernel/setup.c @@ -116,7 +116,7 @@ void __init setup_arch(char **cmdline_p) #endif #else if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && - (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)) + (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS) /* overlap userarea */ memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; #endif diff --git a/trunk/arch/h8300/kernel/signal.c b/trunk/arch/h8300/kernel/signal.c index 02955604d760..7787f70a05bb 100644 --- a/trunk/arch/h8300/kernel/signal.c +++ b/trunk/arch/h8300/kernel/signal.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/arch/i386/kernel/acpi/cstate.c b/trunk/arch/i386/kernel/acpi/cstate.c index 12e937c1ce4b..4664b55f623e 100644 --- a/trunk/arch/i386/kernel/acpi/cstate.c +++ b/trunk/arch/i386/kernel/acpi/cstate.c @@ -156,8 +156,10 @@ static int __init ffh_cstate_init(void) static void __exit ffh_cstate_exit(void) { - free_percpu(cpu_cstate_entry); - cpu_cstate_entry = NULL; + if (cpu_cstate_entry) { + free_percpu(cpu_cstate_entry); + cpu_cstate_entry = NULL; + } } arch_initcall(ffh_cstate_init); diff --git a/trunk/arch/i386/kernel/cpu/mcheck/non-fatal.c b/trunk/arch/i386/kernel/cpu/mcheck/non-fatal.c index 6b5d3518a1c0..1f9153ae5b03 100644 --- a/trunk/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/trunk/arch/i386/kernel/cpu/mcheck/non-fatal.c @@ -51,10 +51,10 @@ static void mce_checkregs (void *info) } } -static void mce_work_fn(struct work_struct *work); -static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); +static void mce_work_fn(void *data); +static DECLARE_WORK(mce_work, mce_work_fn, NULL); -static void mce_work_fn(struct work_struct *work) +static void mce_work_fn(void *data) { on_each_cpu(mce_checkregs, NULL, 1, 1); schedule_delayed_work(&mce_work, MCE_RATE); diff --git a/trunk/arch/i386/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/i386/kernel/cpu/mcheck/therm_throt.c index 065005c3f168..bad8b4420709 100644 --- a/trunk/arch/i386/kernel/cpu/mcheck/therm_throt.c +++ b/trunk/arch/i386/kernel/cpu/mcheck/therm_throt.c @@ -116,6 +116,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); } +#ifdef CONFIG_HOTPLUG_CPU static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) { return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); @@ -152,6 +153,7 @@ static struct notifier_block thermal_throttle_cpu_notifier = { .notifier_call = thermal_throttle_cpu_callback, }; +#endif /* CONFIG_HOTPLUG_CPU */ static __init int thermal_throttle_init_device(void) { diff --git a/trunk/arch/i386/kernel/cpuid.c b/trunk/arch/i386/kernel/cpuid.c index 23b2cc748d4e..ab0c327e79dc 100644 --- a/trunk/arch/i386/kernel/cpuid.c +++ b/trunk/arch/i386/kernel/cpuid.c @@ -167,6 +167,7 @@ static int cpuid_device_create(int i) return err; } +#ifdef CONFIG_HOTPLUG_CPU static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -186,6 +187,7 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = { .notifier_call = cpuid_class_cpu_callback, }; +#endif /* !CONFIG_HOTPLUG_CPU */ static int __init cpuid_init(void) { diff --git a/trunk/arch/i386/kernel/crash.c b/trunk/arch/i386/kernel/crash.c index a5e0e990ea95..144b43288965 100644 --- a/trunk/arch/i386/kernel/crash.c +++ b/trunk/arch/i386/kernel/crash.c @@ -31,6 +31,68 @@ /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, + size_t data_len) +{ + struct elf_note note; + + note.n_namesz = strlen(name) + 1; + note.n_descsz = data_len; + note.n_type = type; + memcpy(buf, ¬e, sizeof(note)); + buf += (sizeof(note) +3)/4; + memcpy(buf, name, note.n_namesz); + buf += (note.n_namesz + 3)/4; + memcpy(buf, data, note.n_descsz); + buf += (note.n_descsz + 3)/4; + + return buf; +} + +static void final_note(u32 *buf) +{ + struct elf_note note; + + note.n_namesz = 0; + note.n_descsz = 0; + note.n_type = 0; + memcpy(buf, ¬e, sizeof(note)); +} + +static void crash_save_this_cpu(struct pt_regs *regs, int cpu) +{ + struct elf_prstatus prstatus; + u32 *buf; + + if ((cpu < 0) || (cpu >= NR_CPUS)) + return; + + /* Using ELF notes here is opportunistic. + * I need a well defined structure format + * for the data I pass, and I need tags + * on the data to indicate what information I have + * squirrelled away. ELF notes happen to provide + * all of that, so there is no need to invent something new. + */ + buf = (u32*)per_cpu_ptr(crash_notes, cpu); + if (!buf) + return; + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + elf_core_copy_regs(&prstatus.pr_reg, regs); + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + final_note(buf); +} + +static void crash_save_self(struct pt_regs *regs) +{ + int cpu; + + cpu = safe_smp_processor_id(); + crash_save_this_cpu(regs, cpu); +} + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static atomic_t waiting_for_crash_ipi; @@ -59,7 +121,7 @@ static int crash_nmi_callback(struct notifier_block *self, crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } - crash_save_cpu(regs, cpu); + crash_save_this_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ @@ -133,5 +195,5 @@ void machine_crash_shutdown(struct pt_regs *regs) #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); #endif - crash_save_cpu(regs, safe_smp_processor_id()); + crash_save_self(regs); } diff --git a/trunk/arch/i386/kernel/io_apic.c b/trunk/arch/i386/kernel/io_apic.c index 44c5a3206b2a..3b7a63e0ed1a 100644 --- a/trunk/arch/i386/kernel/io_apic.c +++ b/trunk/arch/i386/kernel/io_apic.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/i386/kernel/kprobes.c b/trunk/arch/i386/kernel/kprobes.c index af1d53344993..fc79e1e859c4 100644 --- a/trunk/arch/i386/kernel/kprobes.c +++ b/trunk/arch/i386/kernel/kprobes.c @@ -184,7 +184,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + free_insn_slot(p->ainsn.insn); mutex_unlock(&kprobe_mutex); } @@ -333,7 +333,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) return 1; ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) +#ifndef CONFIG_PREEMPT if (p->ainsn.boostable == 1 && !p->post_handler){ /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); diff --git a/trunk/arch/i386/kernel/microcode.c b/trunk/arch/i386/kernel/microcode.c index 972346604f9d..23f5984d0654 100644 --- a/trunk/arch/i386/kernel/microcode.c +++ b/trunk/arch/i386/kernel/microcode.c @@ -703,6 +703,7 @@ static struct sysdev_driver mc_sysdev_driver = { .resume = mc_sysdev_resume, }; +#ifdef CONFIG_HOTPLUG_CPU static __cpuinit int mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -725,6 +726,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) static struct notifier_block mc_cpu_notifier = { .notifier_call = mc_cpu_callback, }; +#endif static int __init microcode_init (void) { diff --git a/trunk/arch/i386/kernel/msr.c b/trunk/arch/i386/kernel/msr.c index 7763c67ca282..a773f776c9ea 100644 --- a/trunk/arch/i386/kernel/msr.c +++ b/trunk/arch/i386/kernel/msr.c @@ -250,6 +250,7 @@ static int msr_device_create(int i) return err; } +#ifdef CONFIG_HOTPLUG_CPU static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -270,6 +271,7 @@ static struct notifier_block __cpuinitdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; +#endif static int __init msr_init(void) { diff --git a/trunk/arch/i386/kernel/reboot.c b/trunk/arch/i386/kernel/reboot.c index 3514b4153f7f..84278e0093a2 100644 --- a/trunk/arch/i386/kernel/reboot.c +++ b/trunk/arch/i386/kernel/reboot.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/i386/kernel/setup.c b/trunk/arch/i386/kernel/setup.c index 97bb869307bc..141041dde74d 100644 --- a/trunk/arch/i386/kernel/setup.c +++ b/trunk/arch/i386/kernel/setup.c @@ -1162,7 +1162,8 @@ void __init setup_bootmem_allocator(void) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem(INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET; + initrd_start = + INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { diff --git a/trunk/arch/i386/kernel/smp.c b/trunk/arch/i386/kernel/smp.c index 9827cf927ecb..31e5c6573aae 100644 --- a/trunk/arch/i386/kernel/smp.c +++ b/trunk/arch/i386/kernel/smp.c @@ -699,10 +699,6 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, put_cpu(); return -EBUSY; } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - spin_lock_bh(&call_lock); __smp_call_function_single(cpu, func, info, nonatomic, wait); spin_unlock_bh(&call_lock); diff --git a/trunk/arch/i386/kernel/smpboot.c b/trunk/arch/i386/kernel/smpboot.c index 02a9b66b6ac3..4bb8b77cd65b 100644 --- a/trunk/arch/i386/kernel/smpboot.c +++ b/trunk/arch/i386/kernel/smpboot.c @@ -1049,15 +1049,13 @@ void cpu_exit_clear(void) struct warm_boot_cpu_info { struct completion *complete; - struct work_struct task; int apicid; int cpu; }; -static void __cpuinit do_warm_boot_cpu(struct work_struct *work) +static void __cpuinit do_warm_boot_cpu(void *p) { - struct warm_boot_cpu_info *info = - container_of(work, struct warm_boot_cpu_info, task); + struct warm_boot_cpu_info *info = p; do_boot_cpu(info->apicid, info->cpu); complete(info->complete); } @@ -1066,6 +1064,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) { DECLARE_COMPLETION_ONSTACK(done); struct warm_boot_cpu_info info; + struct work_struct task; int apicid, ret; struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); @@ -1090,7 +1089,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) info.complete = &done; info.apicid = apicid; info.cpu = cpu; - INIT_WORK(&info.task, do_warm_boot_cpu); + INIT_WORK(&task, do_warm_boot_cpu, &info); tsc_sync_disabled = 1; @@ -1098,7 +1097,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, KERNEL_PGD_PTRS); flush_tlb_all(); - schedule_work(&info.task); + schedule_work(&task); wait_for_completion(&done); tsc_sync_disabled = 0; diff --git a/trunk/arch/i386/kernel/sysenter.c b/trunk/arch/i386/kernel/sysenter.c index 0bbacd0ec175..713ba39d32c6 100644 --- a/trunk/arch/i386/kernel/sysenter.c +++ b/trunk/arch/i386/kernel/sysenter.c @@ -132,7 +132,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) goto up_fail; } - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); if (!vma) { ret = -ENOMEM; goto up_fail; diff --git a/trunk/arch/i386/kernel/traps.c b/trunk/arch/i386/kernel/traps.c index 3124f1b04d67..fe9c5e8e7e6f 100644 --- a/trunk/arch/i386/kernel/traps.c +++ b/trunk/arch/i386/kernel/traps.c @@ -452,7 +452,7 @@ void die(const char * str, struct pt_regs * regs, long err) u32 lock_owner; int lock_owner_depth; } die = { - .lock = __SPIN_LOCK_UNLOCKED(die.lock), + .lock = SPIN_LOCK_UNLOCKED, .lock_owner = -1, .lock_owner_depth = 0 }; diff --git a/trunk/arch/i386/kernel/tsc.c b/trunk/arch/i386/kernel/tsc.c index 9810c8c90750..fbc95828cd74 100644 --- a/trunk/arch/i386/kernel/tsc.c +++ b/trunk/arch/i386/kernel/tsc.c @@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0; static unsigned int cpufreq_init = 0; static struct work_struct cpufreq_delayed_get_work; -static void handle_cpufreq_delayed_get(struct work_struct *work) +static void handle_cpufreq_delayed_get(void *v) { unsigned int cpu; @@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void) { int ret; - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); if (!ret) diff --git a/trunk/arch/i386/mm/highmem.c b/trunk/arch/i386/mm/highmem.c index e0fa6cb655a8..f9f647cdbc7b 100644 --- a/trunk/arch/i386/mm/highmem.c +++ b/trunk/arch/i386/mm/highmem.c @@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); + inc_preempt_count(); if (!PageHighMem(page)) return page_address(page); @@ -50,22 +50,26 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); +#ifdef CONFIG_DEBUG_HIGHMEM + if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { + dec_preempt_count(); + preempt_check_resched(); + return; + } + + if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) + BUG(); +#endif /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea * also, in case the page changes cacheability attributes or becomes * a protected page in a hypervisor. */ - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) - kpte_clear_flush(kmap_pte-idx, vaddr); - else { -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr < PAGE_OFFSET); - BUG_ON(vaddr >= (unsigned long)high_memory); -#endif - } + kpte_clear_flush(kmap_pte-idx, vaddr); - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); } /* This is the same as kmap_atomic() but can map memory that doesn't @@ -76,7 +80,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - pagefault_disable(); + inc_preempt_count(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); diff --git a/trunk/arch/i386/mm/hugetlbpage.c b/trunk/arch/i386/mm/hugetlbpage.c index 34728e4afe48..1719a8141f81 100644 --- a/trunk/arch/i386/mm/hugetlbpage.c +++ b/trunk/arch/i386/mm/hugetlbpage.c @@ -17,113 +17,6 @@ #include #include -static unsigned long page_table_shareable(struct vm_area_struct *svma, - struct vm_area_struct *vma, - unsigned long addr, pgoff_t idx) -{ - unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + - svma->vm_start; - unsigned long sbase = saddr & PUD_MASK; - unsigned long s_end = sbase + PUD_SIZE; - - /* - * match the virtual addresses, permission and the alignment of the - * page table page. - */ - if (pmd_index(addr) != pmd_index(saddr) || - vma->vm_flags != svma->vm_flags || - sbase < svma->vm_start || svma->vm_end < s_end) - return 0; - - return saddr; -} - -static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) -{ - unsigned long base = addr & PUD_MASK; - unsigned long end = base + PUD_SIZE; - - /* - * check on proper vm_flags and page table alignment - */ - if (vma->vm_flags & VM_MAYSHARE && - vma->vm_start <= base && end <= vma->vm_end) - return 1; - return 0; -} - -/* - * search for a shareable pmd page for hugetlb. - */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) -{ - struct vm_area_struct *vma = find_vma(mm, addr); - struct address_space *mapping = vma->vm_file->f_mapping; - pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + - vma->vm_pgoff; - struct prio_tree_iter iter; - struct vm_area_struct *svma; - unsigned long saddr; - pte_t *spte = NULL; - - if (!vma_shareable(vma, addr)) - return; - - spin_lock(&mapping->i_mmap_lock); - vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { - if (svma == vma) - continue; - - saddr = page_table_shareable(svma, vma, addr, idx); - if (saddr) { - spte = huge_pte_offset(svma->vm_mm, saddr); - if (spte) { - get_page(virt_to_page(spte)); - break; - } - } - } - - if (!spte) - goto out; - - spin_lock(&mm->page_table_lock); - if (pud_none(*pud)) - pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK); - else - put_page(virt_to_page(spte)); - spin_unlock(&mm->page_table_lock); -out: - spin_unlock(&mapping->i_mmap_lock); -} - -/* - * unmap huge page backed by shared pte. - * - * Hugetlb pte page is ref counted at the time of mapping. If pte is shared - * indicated by page_count > 1, unmap is achieved by clearing pud and - * decrementing the ref count. If count == 1, the pte page is not shared. - * - * called with vma->vm_mm->page_table_lock held. - * - * returns: 1 successfully unmapped a shared pte page - * 0 the underlying pte page is not shared, or it is the last user - */ -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - pgd_t *pgd = pgd_offset(mm, *addr); - pud_t *pud = pud_offset(pgd, *addr); - - BUG_ON(page_count(virt_to_page(ptep)) == 0); - if (page_count(virt_to_page(ptep)) == 1) - return 0; - - pud_clear(pud); - put_page(virt_to_page(ptep)); - *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; - return 1; -} - pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -132,11 +25,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { - if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); + if (pud) pte = (pte_t *) pmd_alloc(mm, pud, addr); - } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); return pte; diff --git a/trunk/arch/i386/mm/init.c b/trunk/arch/i386/mm/init.c index a6a8e397dd88..167416155ee4 100644 --- a/trunk/arch/i386/mm/init.c +++ b/trunk/arch/i386/mm/init.c @@ -699,8 +699,8 @@ int remove_memory(u64 start, u64 size) #endif #endif -struct kmem_cache *pgd_cache; -struct kmem_cache *pmd_cache; +kmem_cache_t *pgd_cache; +kmem_cache_t *pmd_cache; void __init pgtable_cache_init(void) { diff --git a/trunk/arch/i386/mm/pgtable.c b/trunk/arch/i386/mm/pgtable.c index 33be236fc6af..10126e3f8174 100644 --- a/trunk/arch/i386/mm/pgtable.c +++ b/trunk/arch/i386/mm/pgtable.c @@ -193,7 +193,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) return pte; } -void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) +void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) { memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); } @@ -233,7 +233,7 @@ static inline void pgd_list_del(pgd_t *pgd) set_page_private(next, (unsigned long)pprev); } -void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) +void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; @@ -253,7 +253,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) } /* never called when PTRS_PER_PMD > 1 */ -void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) +void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; /* can be called from interrupt context */ diff --git a/trunk/arch/i386/power/Makefile b/trunk/arch/i386/power/Makefile index 2de7bbf03cd7..8cfa4e8a719d 100644 --- a/trunk/arch/i386/power/Makefile +++ b/trunk/arch/i386/power/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_PM) += cpu.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o +obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o diff --git a/trunk/arch/i386/power/suspend.c b/trunk/arch/i386/power/suspend.c deleted file mode 100644 index db5e98d2eb73..000000000000 --- a/trunk/arch/i386/power/suspend.c +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Suspend support specific for i386 - temporary page tables - * - * Distribute under GPLv2 - * - * Copyright (c) 2006 Rafael J. Wysocki - */ - -#include -#include - -#include -#include -#include - -/* Defined in arch/i386/power/swsusp.S */ -extern int restore_image(void); - -/* Pointer to the temporary resume page tables */ -pgd_t *resume_pg_dir; - -/* The following three functions are based on the analogous code in - * arch/i386/mm/init.c - */ - -/* - * Create a middle page table on a resume-safe page and put a pointer to it in - * the given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t *resume_one_md_table_init(pgd_t *pgd) -{ - pud_t *pud; - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); - if (!pmd_table) - return NULL; - - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - pud = pud_offset(pgd, 0); - - BUG_ON(pmd_table != pmd_offset(pud, 0)); -#else - pud = pud_offset(pgd, 0); - pmd_table = pmd_offset(pud, 0); -#endif - - return pmd_table; -} - -/* - * Create a page table on a resume-safe page and place a pointer to it in - * a middle page directory entry. - */ -static pte_t *resume_one_page_table_init(pmd_t *pmd) -{ - if (pmd_none(*pmd)) { - pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); - if (!page_table) - return NULL; - - set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); - - BUG_ON(page_table != pte_offset_kernel(pmd, 0)); - - return page_table; - } - - return pte_offset_kernel(pmd, 0); -} - -/* - * This maps the physical memory to kernel virtual address space, a total - * of max_low_pfn pages, by creating page tables starting from address - * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. - */ -static int resume_physical_mapping_init(pgd_t *pgd_base) -{ - unsigned long pfn; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - int pgd_idx, pmd_idx; - - pgd_idx = pgd_index(PAGE_OFFSET); - pgd = pgd_base + pgd_idx; - pfn = 0; - - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { - pmd = resume_one_md_table_init(pgd); - if (!pmd) - return -ENOMEM; - - if (pfn >= max_low_pfn) - continue; - - for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { - if (pfn >= max_low_pfn) - break; - - /* Map with big pages if possible, otherwise create - * normal page tables. - * NOTE: We can mark everything as executable here - */ - if (cpu_has_pse) { - set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); - pfn += PTRS_PER_PTE; - } else { - pte_t *max_pte; - - pte = resume_one_page_table_init(pmd); - if (!pte) - return -ENOMEM; - - max_pte = pte + PTRS_PER_PTE; - for (; pte < max_pte; pte++, pfn++) { - if (pfn >= max_low_pfn) - break; - - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); - } - } - } - } - return 0; -} - -static inline void resume_init_first_level_page_table(pgd_t *pg_dir) -{ -#ifdef CONFIG_X86_PAE - int i; - - /* Init entries of the first-level page table to the zero page */ - for (i = 0; i < PTRS_PER_PGD; i++) - set_pgd(pg_dir + i, - __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); -#endif -} - -int swsusp_arch_resume(void) -{ - int error; - - resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); - if (!resume_pg_dir) - return -ENOMEM; - - resume_init_first_level_page_table(resume_pg_dir); - error = resume_physical_mapping_init(resume_pg_dir); - if (error) - return error; - - /* We have got enough memory and from now on we cannot recover */ - restore_image(); - return 0; -} diff --git a/trunk/arch/i386/power/swsusp.S b/trunk/arch/i386/power/swsusp.S index 53662e05b393..8a2b50a0aaad 100644 --- a/trunk/arch/i386/power/swsusp.S +++ b/trunk/arch/i386/power/swsusp.S @@ -28,9 +28,8 @@ ENTRY(swsusp_arch_suspend) call swsusp_save ret -ENTRY(restore_image) - movl resume_pg_dir, %ecx - subl $__PAGE_OFFSET, %ecx +ENTRY(swsusp_arch_resume) + movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx movl %ecx, %cr3 movl restore_pblist, %edx @@ -52,10 +51,6 @@ copy_loop: .p2align 4,,7 done: - /* go back to the original page tables */ - movl $swapper_pg_dir, %ecx - subl $__PAGE_OFFSET, %ecx - movl %ecx, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movl mmu_cr4_features, %eax movl %eax, %edx diff --git a/trunk/arch/ia64/hp/sim/simserial.c b/trunk/arch/ia64/hp/sim/simserial.c index b62f0c4d2c7c..caab986af70c 100644 --- a/trunk/arch/ia64/hp/sim/simserial.c +++ b/trunk/arch/ia64/hp/sim/simserial.c @@ -209,7 +209,7 @@ static void do_serial_bh(void) } #endif -static void do_softint(struct work_struct *private_) +static void do_softint(void *private_) { printk(KERN_ERR "simserial: do_softint called\n"); } @@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) info->flags = sstate->flags; info->xmit_fifo_size = sstate->xmit_fifo_size; info->line = line; - INIT_WORK(&info->work, do_softint); + INIT_WORK(&info->work, do_softint, info); info->state = sstate; if (sstate->info) { kfree(info); diff --git a/trunk/arch/ia64/ia32/binfmt_elf32.c b/trunk/arch/ia64/ia32/binfmt_elf32.c index 578737ec7629..daa6b91bc921 100644 --- a/trunk/arch/ia64/ia32/binfmt_elf32.c +++ b/trunk/arch/ia64/ia32/binfmt_elf32.c @@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs) * it with privilege level 3 because the IVE uses non-privileged accesses to these * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs) * code is locked in specific gate page, which is pointed by pretcode * when setup_frame_ia32 */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs) * Install LDT as anonymous memory. This gives us all-zero segment descriptors * until a task modifies them via modify_ldt(). */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!mpnt) return -ENOMEM; diff --git a/trunk/arch/ia64/ia32/ia32_support.c b/trunk/arch/ia64/ia32/ia32_support.c index 6af400a12ca1..c187743965a0 100644 --- a/trunk/arch/ia64/ia32/ia32_support.c +++ b/trunk/arch/ia64/ia32/ia32_support.c @@ -249,7 +249,7 @@ ia32_init (void) #if PAGE_SHIFT > IA32_PAGE_SHIFT { - extern struct kmem_cache *partial_page_cachep; + extern kmem_cache_t *partial_page_cachep; partial_page_cachep = kmem_cache_create("partial_page_cache", sizeof(struct partial_page), 0, 0, diff --git a/trunk/arch/ia64/ia32/ia32priv.h b/trunk/arch/ia64/ia32/ia32priv.h index cfa0bc0026b5..703a67c934f8 100644 --- a/trunk/arch/ia64/ia32/ia32priv.h +++ b/trunk/arch/ia64/ia32/ia32priv.h @@ -330,6 +330,8 @@ struct old_linux32_dirent { void ia64_elf32_init(struct pt_regs *regs); #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) +#define elf_addr_t u32 + /* This macro yields a bitmask that programs can use to figure out what instruction set this CPU supports. */ #define ELF_HWCAP 0 diff --git a/trunk/arch/ia64/ia32/sys_ia32.c b/trunk/arch/ia64/ia32/sys_ia32.c index a4a6e1463af8..9d6a3f210148 100644 --- a/trunk/arch/ia64/ia32/sys_ia32.c +++ b/trunk/arch/ia64/ia32/sys_ia32.c @@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro } /* SLAB cache for partial_page structures */ -struct kmem_cache *partial_page_cachep; +kmem_cache_t *partial_page_cachep; /* * init partial_page_list. diff --git a/trunk/arch/ia64/kernel/kprobes.c b/trunk/arch/ia64/kernel/kprobes.c index 4d592ee9300b..51217d63285e 100644 --- a/trunk/arch/ia64/kernel/kprobes.c +++ b/trunk/arch/ia64/kernel/kprobes.c @@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); + free_insn_slot(p->ainsn.insn); mutex_unlock(&kprobe_mutex); } /* diff --git a/trunk/arch/ia64/kernel/mca.c b/trunk/arch/ia64/kernel/mca.c index 6bedd97570ca..7cfa63a98cb3 100644 --- a/trunk/arch/ia64/kernel/mca.c +++ b/trunk/arch/ia64/kernel/mca.c @@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) * disable the cmc interrupt vector. */ static void -ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) +ia64_mca_cmc_vector_disable_keventd(void *unused) { on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); } @@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) * enable the cmc interrupt vector. */ static void -ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) +ia64_mca_cmc_vector_enable_keventd(void *unused) { on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); } @@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, monarch_cpu = -1; } -static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); -static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); +static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); +static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); /* * ia64_mca_cmc_int_handler diff --git a/trunk/arch/ia64/kernel/palinfo.c b/trunk/arch/ia64/kernel/palinfo.c index c4c10a0b99d9..0b546e2b36ac 100644 --- a/trunk/arch/ia64/kernel/palinfo.c +++ b/trunk/arch/ia64/kernel/palinfo.c @@ -952,6 +952,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) } } +#ifdef CONFIG_HOTPLUG_CPU static int palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -973,6 +974,7 @@ static struct notifier_block palinfo_cpu_notifier = .notifier_call = palinfo_cpu_callback, .priority = 0, }; +#endif static int __init palinfo_init(void) diff --git a/trunk/arch/ia64/kernel/perfmon.c b/trunk/arch/ia64/kernel/perfmon.c index e2321536ee4c..3aaede0d6981 100644 --- a/trunk/arch/ia64/kernel/perfmon.c +++ b/trunk/arch/ia64/kernel/perfmon.c @@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon DPRINT(("smpl_buf @%p\n", smpl_buf)); /* allocate vma */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) { DPRINT(("Cannot allocate vma\n")); goto error_kmem; diff --git a/trunk/arch/ia64/kernel/salinfo.c b/trunk/arch/ia64/kernel/salinfo.c index fd607ca51a8d..e63b8ca5344a 100644 --- a/trunk/arch/ia64/kernel/salinfo.c +++ b/trunk/arch/ia64/kernel/salinfo.c @@ -575,6 +575,7 @@ static struct file_operations salinfo_data_fops = { .write = salinfo_log_write, }; +#ifdef CONFIG_HOTPLUG_CPU static int __devinit salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -619,6 +620,7 @@ static struct notifier_block salinfo_cpu_notifier = .notifier_call = salinfo_cpu_callback, .priority = 0, }; +#endif /* CONFIG_HOTPLUG_CPU */ static int __init salinfo_init(void) diff --git a/trunk/arch/ia64/kernel/smpboot.c b/trunk/arch/ia64/kernel/smpboot.c index b21ddecea943..f7d7f5668144 100644 --- a/trunk/arch/ia64/kernel/smpboot.c +++ b/trunk/arch/ia64/kernel/smpboot.c @@ -463,17 +463,15 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) } struct create_idle { - struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; void -do_fork_idle(struct work_struct *work) +do_fork_idle(void *_c_idle) { - struct create_idle *c_idle = - container_of(work, struct create_idle, work); + struct create_idle *c_idle = _c_idle; c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); @@ -484,10 +482,10 @@ do_boot_cpu (int sapicid, int cpu) { int timeout; struct create_idle c_idle = { - .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER(c_idle.done), }; + DECLARE_WORK(work, do_fork_idle, &c_idle); c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { @@ -499,9 +497,9 @@ do_boot_cpu (int sapicid, int cpu) * We can't use kernel_thread since we must avoid to reschedule the child. */ if (!keventd_up() || current_is_keventd()) - c_idle.work.func(&c_idle.work); + work.func(work.data); else { - schedule_work(&c_idle.work); + schedule_work(&work); wait_for_completion(&c_idle.done); } diff --git a/trunk/arch/ia64/mm/hugetlbpage.c b/trunk/arch/ia64/mm/hugetlbpage.c index 0c7e94edc20e..f3a9585e98a8 100644 --- a/trunk/arch/ia64/mm/hugetlbpage.c +++ b/trunk/arch/ia64/mm/hugetlbpage.c @@ -64,11 +64,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) return pte; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } /* diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c index 56dc2024220e..ff87a5cba399 100644 --- a/trunk/arch/ia64/mm/init.c +++ b/trunk/arch/ia64/mm/init.c @@ -156,7 +156,7 @@ ia64_init_addr_space (void) * the problem. When the process attempts to write to the register backing store * for the first time, it will get a SEGFAULT in this case. */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -175,7 +175,7 @@ ia64_init_addr_space (void) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; diff --git a/trunk/arch/ia64/pci/pci.c b/trunk/arch/ia64/pci/pci.c index eb92cef9cd0d..f4edfbf27134 100644 --- a/trunk/arch/ia64/pci/pci.c +++ b/trunk/arch/ia64/pci/pci.c @@ -564,8 +564,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask) void pcibios_disable_device (struct pci_dev *dev) { - BUG_ON(atomic_read(&dev->enable_cnt)); - acpi_pci_irq_disable(dev); + if (dev->is_enabled) + acpi_pci_irq_disable(dev); } void diff --git a/trunk/arch/m32r/kernel/setup.c b/trunk/arch/m32r/kernel/setup.c index 936205f7aba0..0e7778be33cc 100644 --- a/trunk/arch/m32r/kernel/setup.c +++ b/trunk/arch/m32r/kernel/setup.c @@ -196,7 +196,9 @@ static unsigned long __init setup_memory(void) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem(INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET; + initrd_start = INITRD_START ? + INITRD_START + PAGE_OFFSET : 0; + initrd_end = initrd_start + INITRD_SIZE; printk("initrd:start[%08lx],size[%08lx]\n", initrd_start, INITRD_SIZE); diff --git a/trunk/arch/m32r/kernel/signal.c b/trunk/arch/m32r/kernel/signal.c index 092ea86bb079..b60cea4aebaa 100644 --- a/trunk/arch/m32r/kernel/signal.c +++ b/trunk/arch/m32r/kernel/signal.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/arch/m32r/mm/discontig.c b/trunk/arch/m32r/mm/discontig.c index c7efdb0aefc5..abb34ccd5986 100644 --- a/trunk/arch/m32r/mm/discontig.c +++ b/trunk/arch/m32r/mm/discontig.c @@ -105,7 +105,9 @@ unsigned long __init setup_memory(void) if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET; + initrd_start = INITRD_START ? + INITRD_START + PAGE_OFFSET : 0; + initrd_end = initrd_start + INITRD_SIZE; printk("initrd:start[%08lx],size[%08lx]\n", initrd_start, INITRD_SIZE); diff --git a/trunk/arch/m68k/amiga/chipram.c b/trunk/arch/m68k/amiga/chipram.c index fa015d801617..de1304c91112 100644 --- a/trunk/arch/m68k/amiga/chipram.c +++ b/trunk/arch/m68k/amiga/chipram.c @@ -52,9 +52,10 @@ void *amiga_chip_alloc(unsigned long size, const char *name) #ifdef DEBUG printk("amiga_chip_alloc: allocate %ld bytes\n", size); #endif - res = kzalloc(sizeof(struct resource), GFP_KERNEL); + res = kmalloc(sizeof(struct resource), GFP_KERNEL); if (!res) return NULL; + memset(res, 0, sizeof(struct resource)); res->name = name; if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) { diff --git a/trunk/arch/m68k/atari/hades-pci.c b/trunk/arch/m68k/atari/hades-pci.c index bee2b1443e36..6ca57b6564da 100644 --- a/trunk/arch/m68k/atari/hades-pci.c +++ b/trunk/arch/m68k/atari/hades-pci.c @@ -375,9 +375,10 @@ struct pci_bus_info * __init init_hades_pci(void) * Allocate memory for bus info structure. */ - bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL); + bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL); if (!bus) return NULL; + memset(bus, 0, sizeof(struct pci_bus_info)); /* * Claim resources. The m68k has no separate I/O space, both diff --git a/trunk/arch/m68k/mm/fault.c b/trunk/arch/m68k/mm/fault.c index 2adbeb16e1b8..911f2ce3f53e 100644 --- a/trunk/arch/m68k/mm/fault.c +++ b/trunk/arch/m68k/mm/fault.c @@ -99,7 +99,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/trunk/arch/m68knommu/platform/5307/timers.c b/trunk/arch/m68knommu/platform/5307/timers.c index e5668af19789..24781f009337 100644 --- a/trunk/arch/m68knommu/platform/5307/timers.c +++ b/trunk/arch/m68knommu/platform/5307/timers.c @@ -3,7 +3,7 @@ /* * timers.c -- generic ColdFire hardware timer support. * - * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com) + * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com) */ /***************************************************************************/ @@ -44,14 +44,6 @@ unsigned int mcf_timerlevel = 5; extern void mcf_settimericr(int timer, int level); extern int mcf_timerirqpending(int timer); -#if defined(CONFIG_M532x) -#define __raw_readtrr __raw_readl -#define __raw_writetrr __raw_writel -#else -#define __raw_readtrr __raw_readw -#define __raw_writetrr __raw_writew -#endif - /***************************************************************************/ void coldfire_tick(void) @@ -65,7 +57,7 @@ void coldfire_tick(void) void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) { __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); - __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); + __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); @@ -84,7 +76,7 @@ unsigned long coldfire_timer_offset(void) unsigned long trr, tcn, offset; tcn = __raw_readw(TA(MCFTIMER_TCN)); - trr = __raw_readtrr(TA(MCFTIMER_TRR)); + trr = __raw_readw(TA(MCFTIMER_TRR)); offset = (tcn * (1000000 / HZ)) / trr; /* Check if we just wrapped the counters and maybe missed a tick */ @@ -128,7 +120,7 @@ void coldfire_profile_init(void) /* Set up TIMER 2 as high speed profile clock */ __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); - __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); + __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); diff --git a/trunk/arch/m68knommu/platform/68360/config.c b/trunk/arch/m68knommu/platform/68360/config.c index 1b36f6261764..c5482e3622eb 100644 --- a/trunk/arch/m68knommu/platform/68360/config.c +++ b/trunk/arch/m68knommu/platform/68360/config.c @@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp, { } -int BSP_hwclk(int op, struct rtc_time *t) +int BSP_hwclk(int op, struct hwclk_time *t) { if (!op) { /* read */ diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index d8af858fe3f5..4d64960be035 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -242,7 +242,6 @@ config LASAT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ config MIPS_ATLAS bool "MIPS Atlas board" @@ -266,7 +265,6 @@ config MIPS_ATLAS select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL - select GENERIC_HARDIRQS_NO__DO_IRQ help This enables support for the MIPS Technologies Atlas evaluation board. @@ -421,7 +419,6 @@ config MOMENCO_OCELOT_C select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ help The Ocelot is a MIPS-based Single Board Computer (SBC) made by Momentum Computer . @@ -572,7 +569,6 @@ config SGI_IP27 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_SMP - select GENERIC_HARDIRQS_NO__DO_IRQ help This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics workstations. To compile a Linux kernel that runs on these, say Y @@ -839,10 +835,6 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER bool default y -config GENERIC_HARDIRQS_NO__DO_IRQ - bool - default n - # # Select some configuration options automatically based on user selections. # @@ -1004,7 +996,6 @@ config SOC_PNX8550 select HW_HAS_PCI select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL - select GENERIC_HARDIRQS_NO__DO_IRQ config SWAP_IO_SPACE bool diff --git a/trunk/arch/mips/dec/ecc-berr.c b/trunk/arch/mips/dec/ecc-berr.c index 6d55e8aab668..c8430c07355e 100644 --- a/trunk/arch/mips/dec/ecc-berr.c +++ b/trunk/arch/mips/dec/ecc-berr.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/mips/dec/ioasic-irq.c b/trunk/arch/mips/dec/ioasic-irq.c index 4c7cb4048d35..269b22b34313 100644 --- a/trunk/arch/mips/dec/ioasic-irq.c +++ b/trunk/arch/mips/dec/ioasic-irq.c @@ -67,6 +67,7 @@ static struct irq_chip ioasic_irq_type = { .mask = mask_ioasic_irq, .mask_ack = ack_ioasic_irq, .unmask = unmask_ioasic_irq, + .end = end_ioasic_irq, }; @@ -105,7 +106,8 @@ void __init init_ioasic_irqs(int base) set_irq_chip_and_handler(i, &ioasic_irq_type, handle_level_irq); for (; i < base + IO_IRQ_LINES; i++) - set_irq_chip(i, &ioasic_dma_irq_type); + set_irq_chip_and_handler(i, &ioasic_dma_irq_type, + handle_level_irq); ioasic_irq_base = base; } diff --git a/trunk/arch/mips/dec/kn01-berr.c b/trunk/arch/mips/dec/kn01-berr.c index d3b8002bf1e7..f19b4617a0a6 100644 --- a/trunk/arch/mips/dec/kn01-berr.c +++ b/trunk/arch/mips/dec/kn01-berr.c @@ -20,10 +20,8 @@ #include #include -#include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/dec/kn02-irq.c b/trunk/arch/mips/dec/kn02-irq.c index 916e46b8ccd8..5a9be4c93584 100644 --- a/trunk/arch/mips/dec/kn02-irq.c +++ b/trunk/arch/mips/dec/kn02-irq.c @@ -57,12 +57,19 @@ static void ack_kn02_irq(unsigned int irq) iob(); } +static void end_kn02_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + unmask_kn02_irq(irq); +} + static struct irq_chip kn02_irq_type = { .typename = "KN02-CSR", .ack = ack_kn02_irq, .mask = mask_kn02_irq, .mask_ack = ack_kn02_irq, .unmask = unmask_kn02_irq, + .end = end_kn02_irq, }; diff --git a/trunk/arch/mips/emma2rh/common/irq_emma2rh.c b/trunk/arch/mips/emma2rh/common/irq_emma2rh.c index 8d880f0b06ec..59b98299c896 100644 --- a/trunk/arch/mips/emma2rh/common/irq_emma2rh.c +++ b/trunk/arch/mips/emma2rh/common/irq_emma2rh.c @@ -56,12 +56,19 @@ static void emma2rh_irq_disable(unsigned int irq) ll_emma2rh_irq_disable(irq - emma2rh_irq_base); } +static void emma2rh_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + ll_emma2rh_irq_enable(irq - emma2rh_irq_base); +} + struct irq_chip emma2rh_irq_controller = { .typename = "emma2rh_irq", .ack = emma2rh_irq_disable, .mask = emma2rh_irq_disable, .mask_ack = emma2rh_irq_disable, .unmask = emma2rh_irq_enable, + .end = emma2rh_irq_end, }; void emma2rh_irq_init(u32 irq_base) diff --git a/trunk/arch/mips/emma2rh/markeins/irq_markeins.c b/trunk/arch/mips/emma2rh/markeins/irq_markeins.c index 2116d9be5fa9..3ac4e405ecdc 100644 --- a/trunk/arch/mips/emma2rh/markeins/irq_markeins.c +++ b/trunk/arch/mips/emma2rh/markeins/irq_markeins.c @@ -48,12 +48,19 @@ static void emma2rh_sw_irq_disable(unsigned int irq) ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base); } +static void emma2rh_sw_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base); +} + struct irq_chip emma2rh_sw_irq_controller = { .typename = "emma2rh_sw_irq", .ack = emma2rh_sw_irq_disable, .mask = emma2rh_sw_irq_disable, .mask_ack = emma2rh_sw_irq_disable, .unmask = emma2rh_sw_irq_enable, + .end = emma2rh_sw_irq_end, }; void emma2rh_sw_irq_init(u32 irq_base) diff --git a/trunk/arch/mips/jazz/irq.c b/trunk/arch/mips/jazz/irq.c index f8d417b5c2bb..5c4f50cdf157 100644 --- a/trunk/arch/mips/jazz/irq.c +++ b/trunk/arch/mips/jazz/irq.c @@ -39,12 +39,19 @@ void disable_r4030_irq(unsigned int irq) spin_unlock_irqrestore(&r4030_lock, flags); } +static void end_r4030_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_r4030_irq(irq); +} + static struct irq_chip r4030_irq_type = { .typename = "R4030", .ack = disable_r4030_irq, .mask = disable_r4030_irq, .mask_ack = disable_r4030_irq, .unmask = enable_r4030_irq, + .end = end_r4030_irq, }; void __init init_r4030_ints(void) diff --git a/trunk/arch/mips/kernel/binfmt_elfn32.c b/trunk/arch/mips/kernel/binfmt_elfn32.c index 9b34238d41c0..4a9f1ecefaf2 100644 --- a/trunk/arch/mips/kernel/binfmt_elfn32.c +++ b/trunk/arch/mips/kernel/binfmt_elfn32.c @@ -90,6 +90,7 @@ struct elf_prpsinfo32 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; +#define elf_addr_t u32 #define elf_caddr_t u32 #define init_elf_binfmt init_elfn32_binfmt diff --git a/trunk/arch/mips/kernel/binfmt_elfo32.c b/trunk/arch/mips/kernel/binfmt_elfo32.c index 993f7ec70f35..e31813779895 100644 --- a/trunk/arch/mips/kernel/binfmt_elfo32.c +++ b/trunk/arch/mips/kernel/binfmt_elfo32.c @@ -92,6 +92,7 @@ struct elf_prpsinfo32 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; +#define elf_addr_t u32 #define elf_caddr_t u32 #define init_elf_binfmt init_elf32_binfmt diff --git a/trunk/arch/mips/kernel/i8259.c b/trunk/arch/mips/kernel/i8259.c index b59a676c6d0e..2526c0ca4d81 100644 --- a/trunk/arch/mips/kernel/i8259.c +++ b/trunk/arch/mips/kernel/i8259.c @@ -19,6 +19,9 @@ #include #include +void enable_8259A_irq(unsigned int irq); +void disable_8259A_irq(unsigned int irq); + /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. @@ -28,16 +31,23 @@ * moves to arch independent land */ -static int i8259A_auto_eoi; DEFINE_SPINLOCK(i8259A_lock); -/* some platforms call this... */ + +static void end_8259A_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && + irq_desc[irq].action) + enable_8259A_irq(irq); +} + void mask_and_ack_8259A(unsigned int); -static struct irq_chip i8259A_chip = { - .name = "XT-PIC", - .mask = disable_8259A_irq, - .unmask = enable_8259A_irq, - .mask_ack = mask_and_ack_8259A, +static struct irq_chip i8259A_irq_type = { + .typename = "XT-PIC", + .enable = enable_8259A_irq, + .disable = disable_8259A_irq, + .ack = mask_and_ack_8259A, + .end = end_8259A_irq, }; /* @@ -49,8 +59,8 @@ static struct irq_chip i8259A_chip = { */ static unsigned int cached_irq_mask = 0xffff; -#define cached_master_mask (cached_irq_mask) -#define cached_slave_mask (cached_irq_mask >> 8) +#define cached_21 (cached_irq_mask) +#define cached_A1 (cached_irq_mask >> 8) void disable_8259A_irq(unsigned int irq) { @@ -60,9 +70,9 @@ void disable_8259A_irq(unsigned int irq) spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) - outb(cached_slave_mask, PIC_SLAVE_IMR); + outb(cached_A1,0xA1); else - outb(cached_master_mask, PIC_MASTER_IMR); + outb(cached_21,0x21); spin_unlock_irqrestore(&i8259A_lock, flags); } @@ -74,9 +84,9 @@ void enable_8259A_irq(unsigned int irq) spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) - outb(cached_slave_mask, PIC_SLAVE_IMR); + outb(cached_A1,0xA1); else - outb(cached_master_mask, PIC_MASTER_IMR); + outb(cached_21,0x21); spin_unlock_irqrestore(&i8259A_lock, flags); } @@ -88,9 +98,9 @@ int i8259A_irq_pending(unsigned int irq) spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) - ret = inb(PIC_MASTER_CMD) & mask; + ret = inb(0x20) & mask; else - ret = inb(PIC_SLAVE_CMD) & (mask >> 8); + ret = inb(0xA0) & (mask >> 8); spin_unlock_irqrestore(&i8259A_lock, flags); return ret; @@ -99,7 +109,7 @@ int i8259A_irq_pending(unsigned int irq) void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); - set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); + set_irq_chip(irq, &i8259A_irq_type); enable_irq(irq); } @@ -115,14 +125,14 @@ static inline int i8259A_irq_real(unsigned int irq) int irqmask = 1 << irq; if (irq < 8) { - outb(0x0B,PIC_MASTER_CMD); /* ISR register */ - value = inb(PIC_MASTER_CMD) & irqmask; - outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */ + outb(0x0B,0x20); /* ISR register */ + value = inb(0x20) & irqmask; + outb(0x0A,0x20); /* back to the IRR register */ return value; } - outb(0x0B,PIC_SLAVE_CMD); /* ISR register */ - value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); - outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */ + outb(0x0B,0xA0); /* ISR register */ + value = inb(0xA0) & (irqmask >> 8); + outb(0x0A,0xA0); /* back to the IRR register */ return value; } @@ -139,19 +149,17 @@ void mask_and_ack_8259A(unsigned int irq) spin_lock_irqsave(&i8259A_lock, flags); /* - * Lightweight spurious IRQ detection. We do not want - * to overdo spurious IRQ handling - it's usually a sign - * of hardware problems, so we only do the checks we can - * do without slowing down good hardware unnecessarily. + * Lightweight spurious IRQ detection. We do not want to overdo + * spurious IRQ handling - it's usually a sign of hardware problems, so + * we only do the checks we can do without slowing down good hardware + * nnecesserily. * - * Note that IRQ7 and IRQ15 (the two spurious IRQs - * usually resulting from the 8259A-1|2 PICs) occur - * even if the IRQ is masked in the 8259A. Thus we - * can check spurious 8259A IRQs without doing the - * quite slow i8259A_irq_real() call for every IRQ. - * This does not cover 100% of spurious interrupts, - * but should be enough to warn the user that there - * is something bad going on ... + * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting + * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A. + * Thus we can check spurious 8259A IRQs without doing the quite slow + * i8259A_irq_real() call for every IRQ. This does not cover 100% of + * spurious interrupts, but should be enough to warn the user that + * there is something bad going on ... */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; @@ -159,14 +167,14 @@ void mask_and_ack_8259A(unsigned int irq) handle_real_irq: if (irq & 8) { - inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ - outb(cached_slave_mask, PIC_SLAVE_IMR); - outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ - outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ + inb(0xA1); /* DUMMY - (do we need this?) */ + outb(cached_A1,0xA1); + outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */ + outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ } else { - inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ - outb(cached_master_mask, PIC_MASTER_IMR); - outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ + inb(0x21); /* DUMMY - (do we need this?) */ + outb(cached_21,0x21); + outb(0x60+irq,0x20); /* 'Specific EOI' to master */ } #ifdef CONFIG_MIPS_MT_SMTC if (irq_hwmask[irq] & ST0_IM) @@ -187,7 +195,7 @@ void mask_and_ack_8259A(unsigned int irq) goto handle_real_irq; { - static int spurious_irq_mask; + static int spurious_irq_mask = 0; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] @@ -208,25 +216,13 @@ void mask_and_ack_8259A(unsigned int irq) static int i8259A_resume(struct sys_device *dev) { - init_8259A(i8259A_auto_eoi); - return 0; -} - -static int i8259A_shutdown(struct sys_device *dev) -{ - /* Put the i8259A into a quiescent state that - * the kernel initialization code can get it - * out of. - */ - outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + init_8259A(0); return 0; } static struct sysdev_class i8259_sysdev_class = { set_kset_name("i8259"), .resume = i8259A_resume, - .shutdown = i8259A_shutdown, }; static struct sys_device device_i8259A = { @@ -248,41 +244,41 @@ void __init init_8259A(int auto_eoi) { unsigned long flags; - i8259A_auto_eoi = auto_eoi; - spin_lock_irqsave(&i8259A_lock, flags); - outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ + outb(0xff, 0x21); /* mask all of 8259A-1 */ + outb(0xff, 0xA1); /* mask all of 8259A-2 */ /* * outb_p - this has to work on a wide range of PC hardware. */ - outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ - outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */ - outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ - if (auto_eoi) /* master does Auto EOI */ - outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); - else /* master expects normal EOI */ - outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); - - outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ - outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */ - outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ - outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ + outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ + outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */ + outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ + if (auto_eoi) + outb_p(0x03, 0x21); /* master does Auto EOI */ + else + outb_p(0x01, 0x21); /* master expects normal EOI */ + + outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ + outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */ + outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ + outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode + is to be investigated) */ + if (auto_eoi) /* - * In AEOI mode we just have to mask the interrupt + * in AEOI mode we just have to mask the interrupt * when acking. */ - i8259A_chip.mask_ack = disable_8259A_irq; + i8259A_irq_type.ack = disable_8259A_irq; else - i8259A_chip.mask_ack = mask_and_ack_8259A; + i8259A_irq_type.ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ - outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ - outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ + outb(cached_21, 0x21); /* restore master IRQ mask */ + outb(cached_A1, 0xA1); /* restore slave IRQ mask */ spin_unlock_irqrestore(&i8259A_lock, flags); } @@ -295,17 +291,11 @@ static struct irqaction irq2 = { }; static struct resource pic1_io_resource = { - .name = "pic1", - .start = PIC_MASTER_CMD, - .end = PIC_MASTER_IMR, - .flags = IORESOURCE_BUSY + .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY }; static struct resource pic2_io_resource = { - .name = "pic2", - .start = PIC_SLAVE_CMD, - .end = PIC_SLAVE_IMR, - .flags = IORESOURCE_BUSY + .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY }; /* @@ -323,7 +313,7 @@ void __init init_i8259_irqs (void) init_8259A(0); for (i = 0; i < 16; i++) - set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); + set_irq_chip(i, &i8259A_irq_type); - setup_irq(PIC_CASCADE_IR, &irq2); + setup_irq(2, &irq2); } diff --git a/trunk/arch/mips/kernel/irixelf.c b/trunk/arch/mips/kernel/irixelf.c index 1bbefbf43373..ab12c8f01518 100644 --- a/trunk/arch/mips/kernel/irixelf.c +++ b/trunk/arch/mips/kernel/irixelf.c @@ -52,6 +52,10 @@ static struct linux_binfmt irix_format = { irix_core_dump, PAGE_SIZE }; +#ifndef elf_addr_t +#define elf_addr_t unsigned long +#endif + #ifdef DEBUG /* Debugging routines. */ static char *get_elf_p_type(Elf32_Word p_type) @@ -1009,7 +1013,7 @@ static int notesize(struct memelfnote *en) int sz; sz = sizeof(struct elf_note); - sz += roundup(strlen(en->name) + 1, 4); + sz += roundup(strlen(en->name), 4); sz += roundup(en->datasz, 4); return sz; @@ -1028,7 +1032,7 @@ static int writenote(struct memelfnote *men, struct file *file) { struct elf_note en; - en.n_namesz = strlen(men->name) + 1; + en.n_namesz = strlen(men->name); en.n_descsz = men->datasz; en.n_type = men->type; diff --git a/trunk/arch/mips/kernel/irq-mv6434x.c b/trunk/arch/mips/kernel/irq-mv6434x.c index efbd219845b5..6cfb31cafde2 100644 --- a/trunk/arch/mips/kernel/irq-mv6434x.c +++ b/trunk/arch/mips/kernel/irq-mv6434x.c @@ -66,6 +66,15 @@ static inline void unmask_mv64340_irq(unsigned int irq) } } +/* + * End IRQ processing + */ +static void end_mv64340_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + unmask_mv64340_irq(irq); +} + /* * Interrupt handler for interrupts coming from the Marvell chip. * It could be built in ethernet ports etc... @@ -97,6 +106,7 @@ struct irq_chip mv64340_irq_type = { .mask = mask_mv64340_irq, .mask_ack = mask_mv64340_irq, .unmask = unmask_mv64340_irq, + .end = end_mv64340_irq, }; void __init mv64340_irq_init(unsigned int base) diff --git a/trunk/arch/mips/kernel/irq-rm7000.c b/trunk/arch/mips/kernel/irq-rm7000.c index 123324ba8c14..ddcc2a5f8a06 100644 --- a/trunk/arch/mips/kernel/irq-rm7000.c +++ b/trunk/arch/mips/kernel/irq-rm7000.c @@ -29,12 +29,19 @@ static inline void mask_rm7k_irq(unsigned int irq) clear_c0_intcontrol(0x100 << (irq - irq_base)); } +static void rm7k_cpu_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + unmask_rm7k_irq(irq); +} + static struct irq_chip rm7k_irq_controller = { .typename = "RM7000", .ack = mask_rm7k_irq, .mask = mask_rm7k_irq, .mask_ack = mask_rm7k_irq, .unmask = unmask_rm7k_irq, + .end = rm7k_cpu_irq_end, }; void __init rm7k_cpu_irq_init(int base) diff --git a/trunk/arch/mips/kernel/irq-rm9000.c b/trunk/arch/mips/kernel/irq-rm9000.c index 0e6f4c5349d2..ba6440c88abd 100644 --- a/trunk/arch/mips/kernel/irq-rm9000.c +++ b/trunk/arch/mips/kernel/irq-rm9000.c @@ -80,12 +80,19 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq) on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); } +static void rm9k_cpu_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + unmask_rm9k_irq(irq); +} + static struct irq_chip rm9k_irq_controller = { .typename = "RM9000", .ack = mask_rm9k_irq, .mask = mask_rm9k_irq, .mask_ack = mask_rm9k_irq, .unmask = unmask_rm9k_irq, + .end = rm9k_cpu_irq_end, }; static struct irq_chip rm9k_perfcounter_irq = { @@ -96,6 +103,7 @@ static struct irq_chip rm9k_perfcounter_irq = { .mask = mask_rm9k_irq, .mask_ack = mask_rm9k_irq, .unmask = unmask_rm9k_irq, + .end = rm9k_cpu_irq_end, }; unsigned int rm9000_perfcount_irq; diff --git a/trunk/arch/mips/kernel/irq.c b/trunk/arch/mips/kernel/irq.c index 2fe4c868a801..b339798b3172 100644 --- a/trunk/arch/mips/kernel/irq.c +++ b/trunk/arch/mips/kernel/irq.c @@ -117,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif - seq_printf(p, " %14s", irq_desc[i].chip->name); + seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/trunk/arch/mips/kernel/irq_cpu.c b/trunk/arch/mips/kernel/irq_cpu.c index fcc86b96ccf6..be5ac23d3812 100644 --- a/trunk/arch/mips/kernel/irq_cpu.c +++ b/trunk/arch/mips/kernel/irq_cpu.c @@ -50,6 +50,12 @@ static inline void mask_mips_irq(unsigned int irq) irq_disable_hazard(); } +static void mips_cpu_irq_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + unmask_mips_irq(irq); +} + static struct irq_chip mips_cpu_irq_controller = { .typename = "MIPS", .ack = mask_mips_irq, @@ -57,6 +63,7 @@ static struct irq_chip mips_cpu_irq_controller = { .mask_ack = mask_mips_irq, .unmask = unmask_mips_irq, .eoi = unmask_mips_irq, + .end = mips_cpu_irq_end, }; /* @@ -89,6 +96,8 @@ static void mips_mt_cpu_irq_ack(unsigned int irq) mask_mips_mt_irq(irq); } +#define mips_mt_cpu_irq_end mips_cpu_irq_end + static struct irq_chip mips_mt_cpu_irq_controller = { .typename = "MIPS", .startup = mips_mt_cpu_irq_startup, @@ -97,6 +106,7 @@ static struct irq_chip mips_mt_cpu_irq_controller = { .mask_ack = mips_mt_cpu_irq_ack, .unmask = unmask_mips_mt_irq, .eoi = unmask_mips_mt_irq, + .end = mips_mt_cpu_irq_end, }; void __init mips_cpu_irq_init(int irq_base) diff --git a/trunk/arch/mips/kernel/kspd.c b/trunk/arch/mips/kernel/kspd.c index 2c82412b9efe..f06a144c7881 100644 --- a/trunk/arch/mips/kernel/kspd.c +++ b/trunk/arch/mips/kernel/kspd.c @@ -319,7 +319,7 @@ static void sp_cleanup(void) static int channel_open = 0; /* the work handler */ -static void sp_work(struct work_struct *unused) +static void sp_work(void *data) { if (!channel_open) { if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { @@ -354,7 +354,7 @@ static void startwork(int vpe) return; } - INIT_WORK(&work, sp_work); + INIT_WORK(&work, sp_work, NULL); queue_work(workqueue, &work); } else queue_work(workqueue, &work); diff --git a/trunk/arch/mips/lasat/interrupt.c b/trunk/arch/mips/lasat/interrupt.c index 2affa5ff171c..4a84a7beac53 100644 --- a/trunk/arch/mips/lasat/interrupt.c +++ b/trunk/arch/mips/lasat/interrupt.c @@ -44,12 +44,19 @@ void enable_lasat_irq(unsigned int irq_nr) *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; } +static void end_lasat_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_lasat_irq(irq); +} + static struct irq_chip lasat_irq_type = { .typename = "Lasat", .ack = disable_lasat_irq, .mask = disable_lasat_irq, .mask_ack = disable_lasat_irq, .unmask = enable_lasat_irq, + .end = end_lasat_irq, }; static inline int ls1bit32(unsigned int x) diff --git a/trunk/arch/mips/mm/dma-coherent.c b/trunk/arch/mips/mm/dma-coherent.c index 5697c6e250a3..7fa5fd16e46b 100644 --- a/trunk/arch/mips/mm/dma-coherent.c +++ b/trunk/arch/mips/mm/dma-coherent.c @@ -190,14 +190,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +int dma_is_consistent(dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); diff --git a/trunk/arch/mips/mm/dma-ip27.c b/trunk/arch/mips/mm/dma-ip27.c index f088344db465..8da19fd22ac6 100644 --- a/trunk/arch/mips/mm/dma-ip27.c +++ b/trunk/arch/mips/mm/dma-ip27.c @@ -197,14 +197,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +int dma_is_consistent(dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); diff --git a/trunk/arch/mips/mm/dma-ip32.c b/trunk/arch/mips/mm/dma-ip32.c index b42b6f7456e6..ec54ed0d26ff 100644 --- a/trunk/arch/mips/mm/dma-ip32.c +++ b/trunk/arch/mips/mm/dma-ip32.c @@ -363,15 +363,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +int dma_is_consistent(dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) +void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) return; diff --git a/trunk/arch/mips/mm/dma-noncoherent.c b/trunk/arch/mips/mm/dma-noncoherent.c index 8cecef0957c3..2eeffe5c2a3a 100644 --- a/trunk/arch/mips/mm/dma-noncoherent.c +++ b/trunk/arch/mips/mm/dma-noncoherent.c @@ -299,15 +299,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +int dma_is_consistent(dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) +void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) return; diff --git a/trunk/arch/mips/mm/highmem.c b/trunk/arch/mips/mm/highmem.c index 675502ada5a2..99ebf3ccc222 100644 --- a/trunk/arch/mips/mm/highmem.c +++ b/trunk/arch/mips/mm/highmem.c @@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); + inc_preempt_count(); if (!PageHighMem(page)) return page_address(page); @@ -62,7 +62,8 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); return; } @@ -77,7 +78,8 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) local_flush_tlb_one(vaddr); #endif - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); } #ifndef CONFIG_LIMITED_DMA @@ -90,7 +92,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - pagefault_disable(); + inc_preempt_count(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); diff --git a/trunk/arch/mips/momentum/ocelot_c/cpci-irq.c b/trunk/arch/mips/momentum/ocelot_c/cpci-irq.c index bb11fef08472..e5a4a0a8a7f0 100644 --- a/trunk/arch/mips/momentum/ocelot_c/cpci-irq.c +++ b/trunk/arch/mips/momentum/ocelot_c/cpci-irq.c @@ -65,6 +65,15 @@ static inline void unmask_cpci_irq(unsigned int irq) value = OCELOT_FPGA_READ(INTMASK); } +/* + * End IRQ processing + */ +static void end_cpci_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + unmask_cpci_irq(irq); +} + /* * Interrupt handler for interrupts coming from the FPGA chip. * It could be built in ethernet ports etc... @@ -89,6 +98,7 @@ struct irq_chip cpci_irq_type = { .mask = mask_cpci_irq, .mask_ack = mask_cpci_irq, .unmask = unmask_cpci_irq, + .end = end_cpci_irq, }; void cpci_irq_init(void) diff --git a/trunk/arch/mips/momentum/ocelot_c/uart-irq.c b/trunk/arch/mips/momentum/ocelot_c/uart-irq.c index a7a80c0da569..0029f0008dea 100644 --- a/trunk/arch/mips/momentum/ocelot_c/uart-irq.c +++ b/trunk/arch/mips/momentum/ocelot_c/uart-irq.c @@ -59,6 +59,15 @@ static inline void unmask_uart_irq(unsigned int irq) value = OCELOT_FPGA_READ(UART_INTMASK); } +/* + * End IRQ processing + */ +static void end_uart_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + unmask_uart_irq(irq); +} + /* * Interrupt handler for interrupts coming from the FPGA chip. */ @@ -82,6 +91,7 @@ struct irq_chip uart_irq_type = { .mask = mask_uart_irq, .mask_ack = mask_uart_irq, .unmask = unmask_uart_irq, + .end = end_uart_irq, }; void uart_irq_init(void) diff --git a/trunk/arch/mips/philips/pnx8550/common/int.c b/trunk/arch/mips/philips/pnx8550/common/int.c index 2c36c108c4d6..0dc23930edbd 100644 --- a/trunk/arch/mips/philips/pnx8550/common/int.c +++ b/trunk/arch/mips/philips/pnx8550/common/int.c @@ -158,12 +158,20 @@ int pnx8550_set_gic_priority(int irq, int priority) return prev_priority; } +static void end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) { + unmask_irq(irq); + } +} + static struct irq_chip level_irq_type = { .typename = "PNX Level IRQ", .ack = mask_irq, .mask = mask_irq, .mask_ack = mask_irq, .unmask = unmask_irq, + .end = end_irq, }; static struct irqaction gic_action = { diff --git a/trunk/arch/mips/sgi-ip22/ip22-int.c b/trunk/arch/mips/sgi-ip22/ip22-int.c index c44f8be0644f..c7b138053159 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-int.c +++ b/trunk/arch/mips/sgi-ip22/ip22-int.c @@ -51,12 +51,19 @@ static void disable_local0_irq(unsigned int irq) sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0)); } +static void end_local0_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_local0_irq(irq); +} + static struct irq_chip ip22_local0_irq_type = { .typename = "IP22 local 0", .ack = disable_local0_irq, .mask = disable_local0_irq, .mask_ack = disable_local0_irq, .unmask = enable_local0_irq, + .end = end_local0_irq, }; static void enable_local1_irq(unsigned int irq) @@ -72,12 +79,19 @@ void disable_local1_irq(unsigned int irq) sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1)); } +static void end_local1_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_local1_irq(irq); +} + static struct irq_chip ip22_local1_irq_type = { .typename = "IP22 local 1", .ack = disable_local1_irq, .mask = disable_local1_irq, .mask_ack = disable_local1_irq, .unmask = enable_local1_irq, + .end = end_local1_irq, }; static void enable_local2_irq(unsigned int irq) @@ -93,12 +107,19 @@ void disable_local2_irq(unsigned int irq) sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0)); } +static void end_local2_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_local2_irq(irq); +} + static struct irq_chip ip22_local2_irq_type = { .typename = "IP22 local 2", .ack = disable_local2_irq, .mask = disable_local2_irq, .mask_ack = disable_local2_irq, .unmask = enable_local2_irq, + .end = end_local2_irq, }; static void enable_local3_irq(unsigned int irq) @@ -114,12 +135,19 @@ void disable_local3_irq(unsigned int irq) sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1)); } +static void end_local3_irq (unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_local3_irq(irq); +} + static struct irq_chip ip22_local3_irq_type = { .typename = "IP22 local 3", .ack = disable_local3_irq, .mask = disable_local3_irq, .mask_ack = disable_local3_irq, .unmask = enable_local3_irq, + .end = end_local3_irq, }; static void indy_local0_irqdispatch(void) diff --git a/trunk/arch/mips/sgi-ip27/ip27-irq.c b/trunk/arch/mips/sgi-ip27/ip27-irq.c index 319f8803ef6f..5f8835b4e84a 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-irq.c +++ b/trunk/arch/mips/sgi-ip27/ip27-irq.c @@ -332,6 +332,13 @@ static inline void disable_bridge_irq(unsigned int irq) intr_disconnect_level(cpu, swlevel); } +static void end_bridge_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && + irq_desc[irq].action) + enable_bridge_irq(irq); +} + static struct irq_chip bridge_irq_type = { .typename = "bridge", .startup = startup_bridge_irq, @@ -340,6 +347,7 @@ static struct irq_chip bridge_irq_type = { .mask = disable_bridge_irq, .mask_ack = disable_bridge_irq, .unmask = enable_bridge_irq, + .end = end_bridge_irq, }; void __devinit register_bridge_irq(unsigned int irq) diff --git a/trunk/arch/mips/sgi-ip27/ip27-timer.c b/trunk/arch/mips/sgi-ip27/ip27-timer.c index c20e9899b34b..7d361726bbfb 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-timer.c +++ b/trunk/arch/mips/sgi-ip27/ip27-timer.c @@ -180,6 +180,10 @@ static void disable_rt_irq(unsigned int irq) { } +static void end_rt_irq(unsigned int irq) +{ +} + static struct irq_chip rt_irq_type = { .typename = "SN HUB RT timer", .ack = disable_rt_irq, @@ -187,6 +191,7 @@ static struct irq_chip rt_irq_type = { .mask_ack = disable_rt_irq, .unmask = enable_rt_irq, .eoi = enable_rt_irq, + .end = end_rt_irq, }; static struct irqaction rt_irqaction = { diff --git a/trunk/arch/mips/tx4927/common/tx4927_irq.c b/trunk/arch/mips/tx4927/common/tx4927_irq.c index ed4a19adf361..21873de49aa8 100644 --- a/trunk/arch/mips/tx4927/common/tx4927_irq.c +++ b/trunk/arch/mips/tx4927/common/tx4927_irq.c @@ -66,10 +66,12 @@ #define TX4927_IRQ_CP0_INIT ( 1 << 10 ) #define TX4927_IRQ_CP0_ENABLE ( 1 << 13 ) #define TX4927_IRQ_CP0_DISABLE ( 1 << 14 ) +#define TX4927_IRQ_CP0_ENDIRQ ( 1 << 16 ) #define TX4927_IRQ_PIC_INIT ( 1 << 20 ) #define TX4927_IRQ_PIC_ENABLE ( 1 << 23 ) #define TX4927_IRQ_PIC_DISABLE ( 1 << 24 ) +#define TX4927_IRQ_PIC_ENDIRQ ( 1 << 26 ) #define TX4927_IRQ_ALL 0xffffffff #endif @@ -80,10 +82,12 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE | TX4927_IRQ_WARN | TX4927_IRQ_EROR // | TX4927_IRQ_CP0_INIT // | TX4927_IRQ_CP0_ENABLE +// | TX4927_IRQ_CP0_DISABLE // | TX4927_IRQ_CP0_ENDIRQ // | TX4927_IRQ_PIC_INIT // | TX4927_IRQ_PIC_ENABLE // | TX4927_IRQ_PIC_DISABLE +// | TX4927_IRQ_PIC_ENDIRQ // | TX4927_IRQ_INIT // | TX4927_IRQ_NEST1 // | TX4927_IRQ_NEST2 @@ -110,9 +114,11 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE static void tx4927_irq_cp0_enable(unsigned int irq); static void tx4927_irq_cp0_disable(unsigned int irq); +static void tx4927_irq_cp0_end(unsigned int irq); static void tx4927_irq_pic_enable(unsigned int irq); static void tx4927_irq_pic_disable(unsigned int irq); +static void tx4927_irq_pic_end(unsigned int irq); /* * Kernel structs for all pic's @@ -125,6 +131,7 @@ static struct irq_chip tx4927_irq_cp0_type = { .mask = tx4927_irq_cp0_disable, .mask_ack = tx4927_irq_cp0_disable, .unmask = tx4927_irq_cp0_enable, + .end = tx4927_irq_cp0_end, }; #define TX4927_PIC_NAME "TX4927-PIC" @@ -134,6 +141,7 @@ static struct irq_chip tx4927_irq_pic_type = { .mask = tx4927_irq_pic_disable, .mask_ack = tx4927_irq_pic_disable, .unmask = tx4927_irq_pic_enable, + .end = tx4927_irq_pic_end, }; #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL } @@ -206,6 +214,15 @@ static void tx4927_irq_cp0_disable(unsigned int irq) tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0); } +static void tx4927_irq_cp0_end(unsigned int irq) +{ + TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq); + + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + tx4927_irq_cp0_enable(irq); + } +} + /* * Functions for pic */ @@ -359,6 +376,15 @@ static void tx4927_irq_pic_disable(unsigned int irq) tx4927_irq_pic_mask(irq), 0); } +static void tx4927_irq_pic_end(unsigned int irq) +{ + TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq); + + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + tx4927_irq_pic_enable(irq); + } +} + /* * Main init functions */ diff --git a/trunk/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/trunk/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c index 5a5ea6c0b9f6..34cdb2a240e9 100644 --- a/trunk/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c +++ b/trunk/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c @@ -153,6 +153,7 @@ JP7 is not bus master -- do NOT use -- only 4 pci bus master's allowed -- SouthB #define TOSHIBA_RBTX4927_IRQ_IOC_INIT ( 1 << 10 ) #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE ( 1 << 13 ) #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE ( 1 << 14 ) +#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ ( 1 << 16 ) #define TOSHIBA_RBTX4927_IRQ_ISA_INIT ( 1 << 20 ) #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE ( 1 << 23 ) @@ -171,6 +172,7 @@ static const u32 toshiba_rbtx4927_irq_debug_flag = // | TOSHIBA_RBTX4927_IRQ_IOC_INIT // | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE // | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE +// | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ // | TOSHIBA_RBTX4927_IRQ_ISA_INIT // | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE // | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE @@ -221,6 +223,7 @@ extern void mask_and_ack_8259A(unsigned int irq); static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq); static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq); +static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq); #ifdef CONFIG_TOSHIBA_FPCIB0 static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq); @@ -236,6 +239,7 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { .mask = toshiba_rbtx4927_irq_ioc_disable, .mask_ack = toshiba_rbtx4927_irq_ioc_disable, .unmask = toshiba_rbtx4927_irq_ioc_enable, + .end = toshiba_rbtx4927_irq_ioc_end, }; #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000 #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006 @@ -384,6 +388,23 @@ static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq) TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v); } +static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq) +{ + TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ, + "irq=%d\n", irq); + + if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG + || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) { + TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR, + "bad irq=%d\n", irq); + panic("\n"); + } + + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + toshiba_rbtx4927_irq_ioc_enable(irq); + } +} + /**********************************************************************************/ /* Functions for isa */ diff --git a/trunk/arch/mips/tx4938/common/irq.c b/trunk/arch/mips/tx4938/common/irq.c index a347b424d91c..42e127683ae9 100644 --- a/trunk/arch/mips/tx4938/common/irq.c +++ b/trunk/arch/mips/tx4938/common/irq.c @@ -39,9 +39,11 @@ static void tx4938_irq_cp0_enable(unsigned int irq); static void tx4938_irq_cp0_disable(unsigned int irq); +static void tx4938_irq_cp0_end(unsigned int irq); static void tx4938_irq_pic_enable(unsigned int irq); static void tx4938_irq_pic_disable(unsigned int irq); +static void tx4938_irq_pic_end(unsigned int irq); /**********************************************************************************/ /* Kernel structs for all pic's */ @@ -54,6 +56,7 @@ static struct irq_chip tx4938_irq_cp0_type = { .mask = tx4938_irq_cp0_disable, .mask_ack = tx4938_irq_cp0_disable, .unmask = tx4938_irq_cp0_enable, + .end = tx4938_irq_cp0_end, }; #define TX4938_PIC_NAME "TX4938-PIC" @@ -63,6 +66,7 @@ static struct irq_chip tx4938_irq_pic_type = { .mask = tx4938_irq_pic_disable, .mask_ack = tx4938_irq_pic_disable, .unmask = tx4938_irq_pic_enable, + .end = tx4938_irq_pic_end, }; static struct irqaction tx4938_irq_pic_action = { @@ -100,6 +104,14 @@ tx4938_irq_cp0_disable(unsigned int irq) clear_c0_status(tx4938_irq_cp0_mask(irq)); } +static void +tx4938_irq_cp0_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + tx4938_irq_cp0_enable(irq); + } +} + /**********************************************************************************/ /* Functions for pic */ /**********************************************************************************/ @@ -257,6 +269,14 @@ tx4938_irq_pic_disable(unsigned int irq) tx4938_irq_pic_mask(irq), 0); } +static void +tx4938_irq_pic_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + tx4938_irq_pic_enable(irq); + } +} + /**********************************************************************************/ /* Main init functions */ /**********************************************************************************/ diff --git a/trunk/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/trunk/arch/mips/tx4938/toshiba_rbtx4938/irq.c index b6f363d08011..8c87a35f3068 100644 --- a/trunk/arch/mips/tx4938/toshiba_rbtx4938/irq.c +++ b/trunk/arch/mips/tx4938/toshiba_rbtx4938/irq.c @@ -89,6 +89,7 @@ IRQ Device static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq); static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq); +static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq); #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { @@ -97,6 +98,7 @@ static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { .mask = toshiba_rbtx4938_irq_ioc_disable, .mask_ack = toshiba_rbtx4938_irq_ioc_disable, .unmask = toshiba_rbtx4938_irq_ioc_enable, + .end = toshiba_rbtx4938_irq_ioc_end, }; #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000 @@ -165,6 +167,14 @@ toshiba_rbtx4938_irq_ioc_disable(unsigned int irq) TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB); } +static void +toshiba_rbtx4938_irq_ioc_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + toshiba_rbtx4938_irq_ioc_enable(irq); + } +} + extern void __init txx9_spi_irqinit(int irc_irq); void __init arch_init_irq(void) diff --git a/trunk/arch/mips/vr41xx/Kconfig b/trunk/arch/mips/vr41xx/Kconfig index c8dfd8092cab..92f41f6f934a 100644 --- a/trunk/arch/mips/vr41xx/Kconfig +++ b/trunk/arch/mips/vr41xx/Kconfig @@ -6,7 +6,6 @@ config CASIO_E55 select ISA select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ config IBM_WORKPAD bool "Support for IBM WorkPad z50" @@ -16,7 +15,6 @@ config IBM_WORKPAD select ISA select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ config NEC_CMBVR4133 bool "Support for NEC CMB-VR4133" @@ -41,7 +39,6 @@ config TANBAC_TB022X select IRQ_CPU select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ help The TANBAC VR4131 multichip module(TB0225) and the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms @@ -74,7 +71,6 @@ config VICTOR_MPC30X select IRQ_CPU select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ config ZAO_CAPCELLA bool "Support for ZAO Networks Capcella" @@ -84,7 +80,6 @@ config ZAO_CAPCELLA select IRQ_CPU select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN - select GENERIC_HARDIRQS_NO__DO_IRQ config PCI_VR41XX bool "Add PCI control unit support of NEC VR4100 series" diff --git a/trunk/arch/mips/vr41xx/common/icu.c b/trunk/arch/mips/vr41xx/common/icu.c index c075261976c5..54b92a74c7ac 100644 --- a/trunk/arch/mips/vr41xx/common/icu.c +++ b/trunk/arch/mips/vr41xx/common/icu.c @@ -427,12 +427,19 @@ static void enable_sysint1_irq(unsigned int irq) icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); } +static void end_sysint1_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); +} + static struct irq_chip sysint1_irq_type = { .typename = "SYSINT1", .ack = disable_sysint1_irq, .mask = disable_sysint1_irq, .mask_ack = disable_sysint1_irq, .unmask = enable_sysint1_irq, + .end = end_sysint1_irq, }; static void disable_sysint2_irq(unsigned int irq) @@ -445,12 +452,19 @@ static void enable_sysint2_irq(unsigned int irq) icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); } +static void end_sysint2_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); +} + static struct irq_chip sysint2_irq_type = { .typename = "SYSINT2", .ack = disable_sysint2_irq, .mask = disable_sysint2_irq, .mask_ack = disable_sysint2_irq, .unmask = enable_sysint2_irq, + .end = end_sysint2_irq, }; static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) diff --git a/trunk/arch/parisc/kernel/binfmt_elf32.c b/trunk/arch/parisc/kernel/binfmt_elf32.c index ecb10a4f63c6..1e64e7b88110 100644 --- a/trunk/arch/parisc/kernel/binfmt_elf32.c +++ b/trunk/arch/parisc/kernel/binfmt_elf32.c @@ -75,6 +75,7 @@ struct elf_prpsinfo32 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; +#define elf_addr_t unsigned int #define init_elf_binfmt init_elf32_binfmt #define ELF_PLATFORM ("PARISC32\0") diff --git a/trunk/arch/parisc/mm/fault.c b/trunk/arch/parisc/mm/fault.c index 641f9c920eee..64785e46f93b 100644 --- a/trunk/arch/parisc/mm/fault.c +++ b/trunk/arch/parisc/mm/fault.c @@ -152,7 +152,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, const struct exception_table_entry *fix; unsigned long acc_type; - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/trunk/arch/powerpc/kernel/crash.c b/trunk/arch/powerpc/kernel/crash.c index d3f2080d2eee..89b03c8da9d2 100644 --- a/trunk/arch/powerpc/kernel/crash.c +++ b/trunk/arch/powerpc/kernel/crash.c @@ -46,6 +46,61 @@ int crashing_cpu = -1; static cpumask_t cpus_in_crash = CPU_MASK_NONE; cpumask_t cpus_in_sr = CPU_MASK_NONE; +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, + size_t data_len) +{ + struct elf_note note; + + note.n_namesz = strlen(name) + 1; + note.n_descsz = data_len; + note.n_type = type; + memcpy(buf, ¬e, sizeof(note)); + buf += (sizeof(note) +3)/4; + memcpy(buf, name, note.n_namesz); + buf += (note.n_namesz + 3)/4; + memcpy(buf, data, note.n_descsz); + buf += (note.n_descsz + 3)/4; + + return buf; +} + +static void final_note(u32 *buf) +{ + struct elf_note note; + + note.n_namesz = 0; + note.n_descsz = 0; + note.n_type = 0; + memcpy(buf, ¬e, sizeof(note)); +} + +static void crash_save_this_cpu(struct pt_regs *regs, int cpu) +{ + struct elf_prstatus prstatus; + u32 *buf; + + if ((cpu < 0) || (cpu >= NR_CPUS)) + return; + + /* Using ELF notes here is opportunistic. + * I need a well defined structure format + * for the data I pass, and I need tags + * on the data to indicate what information I have + * squirrelled away. ELF notes happen to provide + * all of that that no need to invent something new. + */ + buf = (u32*)per_cpu_ptr(crash_notes, cpu); + if (!buf) + return; + + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + elf_core_copy_regs(&prstatus.pr_reg, regs); + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + final_note(buf); +} + #ifdef CONFIG_SMP static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); @@ -58,7 +113,7 @@ void crash_ipi_callback(struct pt_regs *regs) hard_irq_disable(); if (!cpu_isset(cpu, cpus_in_crash)) - crash_save_cpu(regs, cpu); + crash_save_this_cpu(regs, cpu); cpu_set(cpu, cpus_in_crash); /* @@ -251,7 +306,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); - crash_save_cpu(regs, crashing_cpu); + crash_save_this_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); if (ppc_md.kexec_cpu_down) diff --git a/trunk/arch/powerpc/kernel/kprobes.c b/trunk/arch/powerpc/kernel/kprobes.c index 4657563f8813..7b8d12b9026c 100644 --- a/trunk/arch/powerpc/kernel/kprobes.c +++ b/trunk/arch/powerpc/kernel/kprobes.c @@ -85,7 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); + free_insn_slot(p->ainsn.insn); mutex_unlock(&kprobe_mutex); } diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c index 7d0f13fecc0e..b9561d300516 100644 --- a/trunk/arch/powerpc/kernel/rtas_flash.c +++ b/trunk/arch/powerpc/kernel/rtas_flash.c @@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */ static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; /* Use slab cache to guarantee 4k alignment */ -static struct kmem_cache *flash_block_cache = NULL; +static kmem_cache_t *flash_block_cache = NULL; #define FLASH_BLOCK_LIST_VERSION (1UL) @@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, } /* constructor for flash_block_cache */ -void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags) +void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags) { memset(ptr, 0, RTAS_BLK_SIZE); } diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c index e4ebe1a6228e..320353f0926f 100644 --- a/trunk/arch/powerpc/kernel/signal_32.c +++ b/trunk/arch/powerpc/kernel/signal_32.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #endif #include diff --git a/trunk/arch/powerpc/kernel/vdso.c b/trunk/arch/powerpc/kernel/vdso.c index a4b28c73bba0..c913ad5cad29 100644 --- a/trunk/arch/powerpc/kernel/vdso.c +++ b/trunk/arch/powerpc/kernel/vdso.c @@ -264,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, /* Allocate a VMA structure and fill it up */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); if (vma == NULL) { rc = -ENOMEM; goto fail_mmapsem; diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index 89c836d54809..506d89768d45 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -146,11 +146,6 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) return hugepte_offset(hpdp, addr); } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp) { pte_t *hugepte = hugepd_page(*hpdp); @@ -1047,7 +1042,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, return err; } -static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) { memset(addr, 0, kmem_cache_size(cache)); } diff --git a/trunk/arch/powerpc/mm/init_64.c b/trunk/arch/powerpc/mm/init_64.c index d12a87ec5ae9..9a178549cbcf 100644 --- a/trunk/arch/powerpc/mm/init_64.c +++ b/trunk/arch/powerpc/mm/init_64.c @@ -141,7 +141,7 @@ static int __init setup_kcore(void) } module_init(setup_kcore); -static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) { memset(addr, 0, kmem_cache_size(cache)); } @@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { /* Hugepages need one extra cache, initialized in hugetlbpage.c. We * can't put into the tables above, because HPAGE_SHIFT is not compile * time constant. */ -struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; +kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; #else -struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; +kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; #endif void pgtable_cache_init(void) diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c index e3af9112c026..c7d010749a18 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c @@ -40,7 +40,7 @@ #include "spufs.h" -static struct kmem_cache *spufs_inode_cache; +static kmem_cache_t *spufs_inode_cache; char *isolated_loader; static struct inode * @@ -48,7 +48,7 @@ spufs_alloc_inode(struct super_block *sb) { struct spufs_inode_info *ei; - ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL); + ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); if (!ei) return NULL; @@ -65,7 +65,7 @@ spufs_destroy_inode(struct inode *inode) } static void -spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags) +spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags) { struct spufs_inode_info *ei = p; diff --git a/trunk/arch/powerpc/platforms/embedded6xx/ls_uart.c b/trunk/arch/powerpc/platforms/embedded6xx/ls_uart.c index 0e837762cc5b..31bcdae84823 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/ls_uart.c @@ -14,7 +14,7 @@ static unsigned long avr_clock; static struct work_struct wd_work; -static void wd_stop(struct work_struct *unused) +static void wd_stop(void *unused) { const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; int i = 0, rescue = 8; @@ -122,7 +122,7 @@ static int __init ls_uarts_init(void) ls_uart_init(); - INIT_WORK(&wd_work, wd_stop); + INIT_WORK(&wd_work, wd_stop, NULL); schedule_work(&wd_work); return 0; diff --git a/trunk/arch/powerpc/platforms/powermac/backlight.c b/trunk/arch/powerpc/platforms/powermac/backlight.c index c3a89414ddc0..afa593a8544a 100644 --- a/trunk/arch/powerpc/platforms/powermac/backlight.c +++ b/trunk/arch/powerpc/platforms/powermac/backlight.c @@ -18,11 +18,11 @@ #define OLD_BACKLIGHT_MAX 15 -static void pmac_backlight_key_worker(struct work_struct *work); -static void pmac_backlight_set_legacy_worker(struct work_struct *work); +static void pmac_backlight_key_worker(void *data); +static void pmac_backlight_set_legacy_worker(void *data); -static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); -static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); +static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); +static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); /* Although these variables are used in interrupt context, it makes no sense to * protect them. No user is able to produce enough key events per second and @@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value) return level; } -static void pmac_backlight_key_worker(struct work_struct *work) +static void pmac_backlight_key_worker(void *data) { if (atomic_read(&kernel_backlight_disabled)) return; @@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness) return error; } -static void pmac_backlight_set_legacy_worker(struct work_struct *work) +static void pmac_backlight_set_legacy_worker(void *data) { if (atomic_read(&kernel_backlight_disabled)) return; diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_event.c b/trunk/arch/powerpc/platforms/pseries/eeh_event.c index 49037edf7d39..137077451316 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_event.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_event.c @@ -37,8 +37,8 @@ /* EEH event workqueue setup. */ static DEFINE_SPINLOCK(eeh_eventlist_lock); LIST_HEAD(eeh_eventlist); -static void eeh_thread_launcher(struct work_struct *); -DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); +static void eeh_thread_launcher(void *); +DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); /* Serialize reset sequences for a given pci device */ DEFINE_MUTEX(eeh_event_mutex); @@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy) * eeh_thread_launcher * @dummy - unused */ -static void eeh_thread_launcher(struct work_struct *dummy) +static void eeh_thread_launcher(void *dummy) { if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) printk(KERN_ERR "Failed to start EEH daemon\n"); diff --git a/trunk/arch/ppc/8260_io/fcc_enet.c b/trunk/arch/ppc/8260_io/fcc_enet.c index 709952c25f29..2e1943e27819 100644 --- a/trunk/arch/ppc/8260_io/fcc_enet.c +++ b/trunk/arch/ppc/8260_io/fcc_enet.c @@ -385,7 +385,6 @@ struct fcc_enet_private { phy_info_t *phy; struct work_struct phy_relink; struct work_struct phy_display_config; - struct net_device *dev; uint sequence_done; @@ -1392,11 +1391,10 @@ static phy_info_t *phy_info[] = { NULL }; -static void mii_display_status(struct work_struct *work) +static void mii_display_status(void *data) { - volatile struct fcc_enet_private *fep = - container_of(work, struct fcc_enet_private, phy_relink); - struct net_device *dev = fep->dev; + struct net_device *dev = data; + volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; if (!fep->link && !fep->old_link) { @@ -1430,12 +1428,10 @@ static void mii_display_status(struct work_struct *work) printk(".\n"); } -static void mii_display_config(struct work_struct *work) +static void mii_display_config(void *data) { - volatile struct fcc_enet_private *fep = - container_of(work, struct fcc_enet_private, - phy_display_config); - struct net_device *dev = fep->dev; + struct net_device *dev = data; + volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; printk("%s: config: auto-negotiation ", dev->name); @@ -1762,9 +1758,8 @@ static int __init fec_enet_init(void) cep->phy_id_done = 0; cep->phy_addr = fip->fc_phyaddr; mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); - INIT_WORK(&cep->phy_relink, mii_display_status); - INIT_WORK(&cep->phy_display_config, mii_display_config); - cep->dev = dev; + INIT_WORK(&cep->phy_relink, mii_display_status, dev); + INIT_WORK(&cep->phy_display_config, mii_display_config, dev); #endif /* CONFIG_USE_MDIO */ fip++; diff --git a/trunk/arch/ppc/8xx_io/fec.c b/trunk/arch/ppc/8xx_io/fec.c index e6c28fb423b2..2f9fa9e3d331 100644 --- a/trunk/arch/ppc/8xx_io/fec.c +++ b/trunk/arch/ppc/8xx_io/fec.c @@ -173,7 +173,6 @@ struct fec_enet_private { uint phy_speed; phy_info_t *phy; struct work_struct phy_task; - struct net_device *dev; uint sequence_done; @@ -1264,11 +1263,10 @@ static void mii_display_status(struct net_device *dev) printk(".\n"); } -static void mii_display_config(struct work_struct *work) +static void mii_display_config(void *priv) { - struct fec_enet_private *fep = - container_of(work, struct fec_enet_private, phy_task); - struct net_device *dev = fep->dev; + struct net_device *dev = (struct net_device *)priv; + struct fec_enet_private *fep = dev->priv; volatile uint *s = &(fep->phy_status); printk("%s: config: auto-negotiation ", dev->name); @@ -1297,11 +1295,10 @@ static void mii_display_config(struct work_struct *work) fep->sequence_done = 1; } -static void mii_relink(struct work_struct *work) +static void mii_relink(void *priv) { - struct fec_enet_private *fep = - container_of(work, struct fec_enet_private, phy_task); - struct net_device *dev = fep->dev; + struct net_device *dev = (struct net_device *)priv; + struct fec_enet_private *fep = dev->priv; int duplex; fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; @@ -1328,8 +1325,7 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) { struct fec_enet_private *fep = dev->priv; - fep->dev = dev; - INIT_WORK(&fep->phy_task, mii_relink); + INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); schedule_work(&fep->phy_task); } @@ -1337,8 +1333,7 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) { struct fec_enet_private *fep = dev->priv; - fep->dev = dev; - INIT_WORK(&fep->phy_task, mii_display_config); + INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); schedule_work(&fep->phy_task); } diff --git a/trunk/arch/s390/appldata/appldata_base.c b/trunk/arch/s390/appldata/appldata_base.c index b8c237290263..af1e8fc7d985 100644 --- a/trunk/arch/s390/appldata/appldata_base.c +++ b/trunk/arch/s390/appldata/appldata_base.c @@ -92,8 +92,8 @@ static int appldata_timer_active; * Work queue */ static struct workqueue_struct *appldata_wq; -static void appldata_work_fn(struct work_struct *work); -static DECLARE_WORK(appldata_work, appldata_work_fn); +static void appldata_work_fn(void *data); +static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); /* @@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) * * call data gathering function for each (active) module */ -static void appldata_work_fn(struct work_struct *work) +static void appldata_work_fn(void *data) { struct list_head *lh; struct appldata_ops *ops; @@ -561,6 +561,7 @@ appldata_offline_cpu(int cpu) spin_unlock(&appldata_timer_lock); } +#ifdef CONFIG_HOTPLUG_CPU static int __cpuinit appldata_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -581,6 +582,7 @@ appldata_cpu_notify(struct notifier_block *self, static struct notifier_block appldata_nb = { .notifier_call = appldata_cpu_notify, }; +#endif /* * appldata_init() diff --git a/trunk/arch/s390/kernel/binfmt_elf32.c b/trunk/arch/s390/kernel/binfmt_elf32.c index 5c46054195cb..9565a2dcfadc 100644 --- a/trunk/arch/s390/kernel/binfmt_elf32.c +++ b/trunk/arch/s390/kernel/binfmt_elf32.c @@ -176,6 +176,7 @@ struct elf_prpsinfo32 #include +#define elf_addr_t u32 /* #define init_elf_binfmt init_elf32_binfmt */ diff --git a/trunk/arch/s390/kernel/kprobes.c b/trunk/arch/s390/kernel/kprobes.c index 576368c4f605..67914fe7f317 100644 --- a/trunk/arch/s390/kernel/kprobes.c +++ b/trunk/arch/s390/kernel/kprobes.c @@ -200,7 +200,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); + free_insn_slot(p->ainsn.insn); mutex_unlock(&kprobe_mutex); } diff --git a/trunk/arch/s390/lib/uaccess_std.c b/trunk/arch/s390/lib/uaccess_std.c index bbaca66fa293..2d549ed2e113 100644 --- a/trunk/arch/s390/lib/uaccess_std.c +++ b/trunk/arch/s390/lib/uaccess_std.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #ifndef __s390x__ @@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) default: ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); *old = oldval; return ret; } diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index d83d64af31f2..bffc7e176970 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -51,14 +51,6 @@ config GENERIC_TIME config ARCH_MAY_HAVE_PC_FDC bool -config STACKTRACE_SUPPORT - bool - default y - -config LOCKDEP_SUPPORT - bool - default y - source "init/Kconfig" menu "System type" @@ -227,20 +219,6 @@ config SH_SHMIN help Select SHMIN if configuring for the SHMIN board. -config SH_7206_SOLUTION_ENGINE - bool "SolutionEngine7206" - select CPU_SUBTYPE_SH7206 - help - Select 7206 SolutionEngine if configuring for a Hitachi SH7206 - evaluation board. - -config SH_7619_SOLUTION_ENGINE - bool "SolutionEngine7619" - select CPU_SUBTYPE_SH7619 - help - Select 7619 SolutionEngine if configuring for a Hitachi SH7619 - evaluation board. - config SH_UNKNOWN bool "BareCPU" help @@ -302,20 +280,12 @@ config CF_BASE_ADDR menu "Processor features" -choice - prompt "Endianess selection" - default CPU_LITTLE_ENDIAN - help - Some SuperH machines can be configured for either little or big - endian byte order. These modes require different kernels. - config CPU_LITTLE_ENDIAN bool "Little Endian" - -config CPU_BIG_ENDIAN - bool "Big Endian" - -endchoice + help + Some SuperH machines can be configured for either little or big + endian byte order. These modes require different kernels. Say Y if + your machine is little endian, N if it's a big endian machine. config SH_FPU bool "FPU support" @@ -375,9 +345,6 @@ config CPU_HAS_MASKREG_IRQ config CPU_HAS_INTC2_IRQ bool -config CPU_HAS_IPR_IRQ - bool - config CPU_HAS_SR_RB bool "CPU has SR.RB" depends on CPU_SH3 || CPU_SH4 @@ -390,9 +357,6 @@ config CPU_HAS_SR_RB See for further information on SR.RB and register banking in the kernel in general. -config CPU_HAS_PTEA - bool - endmenu menu "Timer support" @@ -400,25 +364,10 @@ depends on !GENERIC_TIME config SH_TMU bool "TMU timer support" - depends on CPU_SH3 || CPU_SH4 default y help This enables the use of the TMU as the system timer. -config SH_CMT - bool "CMT timer support" - depends on CPU_SH2 - default y - help - This enables the use of the CMT as the system timer. - -config SH_MTU2 - bool "MTU2 timer support" - depends on CPU_SH2A - default n - help - This enables the use of the MTU2 as the system timer. - endmenu source "arch/sh/boards/renesas/hs7751rvoip/Kconfig" @@ -427,52 +376,19 @@ source "arch/sh/boards/renesas/rts7751r2d/Kconfig" source "arch/sh/boards/renesas/r7780rp/Kconfig" -config SH_TIMER_IRQ - int - default "28" if CPU_SUBTYPE_SH7780 - default "86" if CPU_SUBTYPE_SH7619 - default "140" if CPU_SUBTYPE_SH7206 - default "16" - -config NO_IDLE_HZ - bool "Dynamic tick timer" - help - Select this option if you want to disable continuous timer ticks - and have them programmed to occur as required. This option saves - power as the system can remain in idle state for longer. - - By default dynamic tick is disabled during the boot, and can be - manually enabled with: - - echo 1 > /sys/devices/system/timer/timer0/dyn_tick - - Alternatively, if you want dynamic tick automatically enabled - during boot, pass "dyntick=enable" via the kernel command string. - - Please note that dynamic tick may affect the accuracy of - timekeeping on some platforms depending on the implementation. - config SH_PCLK_FREQ int "Peripheral clock frequency (in Hz)" - default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 - default "31250000" if CPU_SUBTYPE_SH7619 - default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \ - CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \ - CPU_SUBTYPE_SH7206 default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780 default "60000000" if CPU_SUBTYPE_SH7751 + default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \ + CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 + default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 default "66000000" if CPU_SUBTYPE_SH4_202 help This option is used to specify the peripheral clock frequency. This is necessary for determining the reference clock value on platforms lacking an RTC. -config SH_CLK_MD - int "CPU Mode Pin Setting" - depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206 - help - MD2 - MD0 Setting. - menu "CPU Frequency scaling" source "drivers/cpufreq/Kconfig" @@ -505,8 +421,6 @@ config HEARTBEAT behavior is platform-dependent, but normally the flash frequency is a hyperbolic function of the 5-minute load average. -source "arch/sh/drivers/Kconfig" - endmenu config ISA_DMA_API diff --git a/trunk/arch/sh/Kconfig.debug b/trunk/arch/sh/Kconfig.debug index 66a25ef4ef1b..48479e014dac 100644 --- a/trunk/arch/sh/Kconfig.debug +++ b/trunk/arch/sh/Kconfig.debug @@ -1,9 +1,5 @@ menu "Kernel hacking" -config TRACE_IRQFLAGS_SUPPORT - bool - default y - source "lib/Kconfig.debug" config SH_STANDARD_BIOS @@ -21,18 +17,7 @@ config SH_STANDARD_BIOS config EARLY_SCIF_CONSOLE bool "Use early SCIF console" - help - This enables an early console using a fixed SCIF port. This can - be used by platforms that are either not running the SH - standard BIOS, or do not wish to use the BIOS callbacks for the - serial I/O. - -config EARLY_SCIF_CONSOLE_PORT - hex "SCIF port for early console" - depends on EARLY_SCIF_CONSOLE - default "0xffe00000" if CPU_SUBTYPE_SH7780 - default "0xfffe9800" if CPU_SUBTYPE_SH72060 - default "0xffe80000" if CPU_SH4 + depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS config EARLY_PRINTK bool "Early printk support" @@ -45,11 +30,6 @@ config EARLY_PRINTK when the kernel may crash or hang before the serial console is initialised. If unsure, say N. - On devices that are running SH-IPL and want to keep the port - initialization consistent while not using the BIOS callbacks, - select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using - the kernel command line option to toggle back and forth. - config DEBUG_STACKOVERFLOW bool "Check for stack overflows" depends on DEBUG_KERNEL diff --git a/trunk/arch/sh/Makefile b/trunk/arch/sh/Makefile index d10bba5e1074..26d62ff51a64 100644 --- a/trunk/arch/sh/Makefile +++ b/trunk/arch/sh/Makefile @@ -13,6 +13,10 @@ # for "archclean" and "archdep" for cleaning up and making dependencies for # this architecture # + +cflags-y := -mb +cflags-$(CONFIG_CPU_LITTLE_ENDIAN) := -ml + isa-y := any isa-$(CONFIG_SH_DSP) := sh isa-$(CONFIG_CPU_SH2) := sh2 @@ -34,16 +38,13 @@ isa-y := $(isa-y)-nofpu endif endif -cflags-$(CONFIG_CPU_SH2) := -m2 -cflags-$(CONFIG_CPU_SH3) := -m3 -cflags-$(CONFIG_CPU_SH4) := -m4 \ - $(call cc-option,-mno-implicit-fp,-m4-nofpu) -cflags-$(CONFIG_CPU_SH4A) := -m4a $(call cc-option,-m4a-nofpu,) - -cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mb -cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -ml +cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),) -cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding +cflags-$(CONFIG_CPU_SH2) += -m2 +cflags-$(CONFIG_CPU_SH3) += -m3 +cflags-$(CONFIG_CPU_SH4) += -m4 \ + $(call cc-option,-mno-implicit-fp,-m4-nofpu) +cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a-nofpu,) cflags-$(CONFIG_SH_DSP) += -Wa,-dsp cflags-$(CONFIG_SH_KGDB) += -g @@ -58,9 +59,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -R .stab -R .stabstr -S # never be used by anyone. Use a board-specific defconfig that has a # reasonable chance of being current instead. # -KBUILD_DEFCONFIG := r7780rp_defconfig - -KBUILD_IMAGE := arch/sh/boot/zImage +KBUILD_DEFCONFIG := rts7751r2d_defconfig # # Choosing incompatible machines durings configuration will result in @@ -110,8 +109,6 @@ machdir-$(CONFIG_SH_SH4202_MICRODEV) := superh/microdev machdir-$(CONFIG_SH_LANDISK) := landisk machdir-$(CONFIG_SH_TITAN) := titan machdir-$(CONFIG_SH_SHMIN) := shmin -machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE) := se/7206 -machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE) := se/7619 machdir-$(CONFIG_SH_UNKNOWN) := unknown incdir-y := $(notdir $(machdir-y)) @@ -127,7 +124,6 @@ core-$(CONFIG_HD64465) += arch/sh/cchips/hd6446x/hd64465/ core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/ cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2 -cpuincdir-$(CONFIG_CPU_SH2A) := cpu-sh2a cpuincdir-$(CONFIG_CPU_SH3) := cpu-sh3 cpuincdir-$(CONFIG_CPU_SH4) := cpu-sh4 diff --git a/trunk/arch/sh/boards/renesas/r7780rp/Makefile b/trunk/arch/sh/boards/renesas/r7780rp/Makefile index 574b0316ed56..f1776d027978 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/Makefile +++ b/trunk/arch/sh/boards/renesas/r7780rp/Makefile @@ -3,6 +3,4 @@ # obj-y := setup.o io.o irq.o - -obj-$(CONFIG_HEARTBEAT) += led.o -obj-$(CONFIG_PUSH_SWITCH) += psw.o +obj-$(CONFIG_HEARTBEAT) += led.o diff --git a/trunk/arch/sh/boards/renesas/r7780rp/irq.c b/trunk/arch/sh/boards/renesas/r7780rp/irq.c index cc381e197783..aa15ec5bc69e 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/irq.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/irq.c @@ -10,7 +10,6 @@ */ #include #include -#include #include #include diff --git a/trunk/arch/sh/boards/renesas/r7780rp/psw.c b/trunk/arch/sh/boards/renesas/r7780rp/psw.c deleted file mode 100644 index c844dfa5d58d..000000000000 --- a/trunk/arch/sh/boards/renesas/r7780rp/psw.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * arch/sh/boards/renesas/r7780rp/psw.c - * - * push switch support for RDBRP-1/RDBREVRP-1 debug boards. - * - * Copyright (C) 2006 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include -#include - -static irqreturn_t psw_irq_handler(int irq, void *arg) -{ - struct platform_device *pdev = arg; - struct push_switch *psw = platform_get_drvdata(pdev); - struct push_switch_platform_info *psw_info = pdev->dev.platform_data; - unsigned int l, mask; - int ret = 0; - - l = ctrl_inw(PA_DBSW); - - /* Nothing to do if there's no state change */ - if (psw->state) { - ret = 1; - goto out; - } - - mask = l & 0x70; - /* Figure out who raised it */ - if (mask & (1 << psw_info->bit)) { - psw->state = !!(mask & (1 << psw_info->bit)); - if (psw->state) /* debounce */ - mod_timer(&psw->debounce, jiffies + 50); - - ret = 1; - } - -out: - /* Clear the switch IRQs */ - l |= (0x7 << 12); - ctrl_outw(l, PA_DBSW); - - return IRQ_RETVAL(ret); -} - -static struct resource psw_resources[] = { - [0] = { - .start = IRQ_PSW, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct push_switch_platform_info s2_platform_data = { - .name = "s2", - .bit = 6, - .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_SHARED, - .irq_handler = psw_irq_handler, -}; - -static struct platform_device s2_switch_device = { - .name = "push-switch", - .id = 0, - .num_resources = ARRAY_SIZE(psw_resources), - .resource = psw_resources, - .dev = { - .platform_data = &s2_platform_data, - }, -}; - -static struct push_switch_platform_info s3_platform_data = { - .name = "s3", - .bit = 5, - .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_SHARED, - .irq_handler = psw_irq_handler, -}; - -static struct platform_device s3_switch_device = { - .name = "push-switch", - .id = 1, - .num_resources = ARRAY_SIZE(psw_resources), - .resource = psw_resources, - .dev = { - .platform_data = &s3_platform_data, - }, -}; - -static struct push_switch_platform_info s4_platform_data = { - .name = "s4", - .bit = 4, - .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_SHARED, - .irq_handler = psw_irq_handler, -}; - -static struct platform_device s4_switch_device = { - .name = "push-switch", - .id = 2, - .num_resources = ARRAY_SIZE(psw_resources), - .resource = psw_resources, - .dev = { - .platform_data = &s4_platform_data, - }, -}; - -static struct platform_device *psw_devices[] = { - &s2_switch_device, &s3_switch_device, &s4_switch_device, -}; - -static int __init psw_init(void) -{ - return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices)); -} -module_init(psw_init); diff --git a/trunk/arch/sh/boards/renesas/r7780rp/setup.c b/trunk/arch/sh/boards/renesas/r7780rp/setup.c index 9f89c8de9db9..c331caeb694b 100644 --- a/trunk/arch/sh/boards/renesas/r7780rp/setup.c +++ b/trunk/arch/sh/boards/renesas/r7780rp/setup.c @@ -44,37 +44,8 @@ static struct platform_device m66596_usb_host_device = { .resource = m66596_usb_host_resources, }; -static struct resource cf_ide_resources[] = { - [0] = { - .start = 0x1f0, - .end = 0x1f0 + 8, - .flags = IORESOURCE_IO, - }, - [1] = { - .start = 0x1f0 + 0x206, - .end = 0x1f0 + 8 + 0x206 + 8, - .flags = IORESOURCE_IO, - }, - [2] = { -#ifdef CONFIG_SH_R7780MP - .start = 1, -#else - .start = 4, -#endif - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device cf_ide_device = { - .name = "pata_platform", - .id = -1, - .num_resources = ARRAY_SIZE(cf_ide_resources), - .resource = cf_ide_resources, -}; - static struct platform_device *r7780rp_devices[] __initdata = { &m66596_usb_host_device, - &cf_ide_device, }; static int __init r7780rp_devices_setup(void) diff --git a/trunk/arch/sh/boards/se/7206/Makefile b/trunk/arch/sh/boards/se/7206/Makefile deleted file mode 100644 index 63950f4f2453..000000000000 --- a/trunk/arch/sh/boards/se/7206/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the 7206 SolutionEngine specific parts of the kernel -# - -obj-y := setup.o io.o irq.o -obj-$(CONFIG_HEARTBEAT) += led.o - diff --git a/trunk/arch/sh/boards/se/7206/io.c b/trunk/arch/sh/boards/se/7206/io.c deleted file mode 100644 index b557273e0cbe..000000000000 --- a/trunk/arch/sh/boards/se/7206/io.c +++ /dev/null @@ -1,123 +0,0 @@ -/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $ - * - * linux/arch/sh/boards/se/7206/io.c - * - * Copyright (C) 2006 Yoshinori Sato - * - * I/O routine for Hitachi 7206 SolutionEngine. - * - */ - -#include -#include -#include -#include - - -static inline void delay(void) -{ - ctrl_inw(0x20000000); /* P2 ROM Area */ -} - -/* MS7750 requires special versions of in*, out* routines, since - PC-like io ports are located at upper half byte of 16-bit word which - can be accessed only with 16-bit wide. */ - -static inline volatile __u16 * -port2adr(unsigned int port) -{ - if (port >= 0x2000) - return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000)); - else if (port >= 0x300 || port < 0x310) - return (volatile __u16 *) (PA_SMSC + (port - 0x300)); -} - -unsigned char se7206_inb(unsigned long port) -{ - return (*port2adr(port))&0xff; -} - -unsigned char se7206_inb_p(unsigned long port) -{ - unsigned long v; - - v = (*port2adr(port))&0xff; - delay(); - return v; -} - -unsigned short se7206_inw(unsigned long port) -{ - return *port2adr(port);; -} - -unsigned int se7206_inl(unsigned long port) -{ - maybebadio(port); - return 0; -} - -void se7206_outb(unsigned char value, unsigned long port) -{ - *(port2adr(port)) = value; -} - -void se7206_outb_p(unsigned char value, unsigned long port) -{ - *(port2adr(port)) = value; - delay(); -} - -void se7206_outw(unsigned short value, unsigned long port) -{ - *port2adr(port) = value; -} - -void se7206_outl(unsigned int value, unsigned long port) -{ - maybebadio(port); -} - -void se7206_insb(unsigned long port, void *addr, unsigned long count) -{ - volatile __u16 *p = port2adr(port); - __u8 *ap = addr; - - while (count--) - *ap++ = *p; -} - -void se7206_insw(unsigned long port, void *addr, unsigned long count) -{ - volatile __u16 *p = port2adr(port); - __u16 *ap = addr; - while (count--) - *ap++ = *p; -} - -void se7206_insl(unsigned long port, void *addr, unsigned long count) -{ - maybebadio(port); -} - -void se7206_outsb(unsigned long port, const void *addr, unsigned long count) -{ - volatile __u16 *p = port2adr(port); - const __u8 *ap = addr; - - while (count--) - *p = *ap++; -} - -void se7206_outsw(unsigned long port, const void *addr, unsigned long count) -{ - volatile __u16 *p = port2adr(port); - const __u16 *ap = addr; - while (count--) - *p = *ap++; -} - -void se7206_outsl(unsigned long port, const void *addr, unsigned long count) -{ - maybebadio(port); -} diff --git a/trunk/arch/sh/boards/se/7206/irq.c b/trunk/arch/sh/boards/se/7206/irq.c deleted file mode 100644 index 3fb0c5f5b23a..000000000000 --- a/trunk/arch/sh/boards/se/7206/irq.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * linux/arch/sh/boards/se/7206/irq.c - * - * Copyright (C) 2005,2006 Yoshinori Sato - * - * Hitachi SolutionEngine Support. - * - */ -#include -#include -#include -#include -#include - -#define INTSTS0 0x31800000 -#define INTSTS1 0x31800002 -#define INTMSK0 0x31800004 -#define INTMSK1 0x31800006 -#define INTSEL 0x31800008 - -static void disable_se7206_irq(unsigned int irq) -{ - unsigned short val; - unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq))); - unsigned short msk0,msk1; - - /* Set the priority in IPR to 0 */ - val = ctrl_inw(INTC_IPR01); - val &= mask; - ctrl_outw(val, INTC_IPR01); - /* FPGA mask set */ - msk0 = ctrl_inw(INTMSK0); - msk1 = ctrl_inw(INTMSK1); - - switch (irq) { - case IRQ0_IRQ: - msk0 |= 0x0010; - break; - case IRQ1_IRQ: - msk0 |= 0x000f; - break; - case IRQ2_IRQ: - msk0 |= 0x0f00; - msk1 |= 0x00ff; - break; - } - ctrl_outw(msk0, INTMSK0); - ctrl_outw(msk1, INTMSK1); -} - -static void enable_se7206_irq(unsigned int irq) -{ - unsigned short val; - unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq))); - unsigned short msk0,msk1; - - /* Set priority in IPR back to original value */ - val = ctrl_inw(INTC_IPR01); - val |= value; - ctrl_outw(val, INTC_IPR01); - - /* FPGA mask reset */ - msk0 = ctrl_inw(INTMSK0); - msk1 = ctrl_inw(INTMSK1); - - switch (irq) { - case IRQ0_IRQ: - msk0 &= ~0x0010; - break; - case IRQ1_IRQ: - msk0 &= ~0x000f; - break; - case IRQ2_IRQ: - msk0 &= ~0x0f00; - msk1 &= ~0x00ff; - break; - } - ctrl_outw(msk0, INTMSK0); - ctrl_outw(msk1, INTMSK1); -} - -static void eoi_se7206_irq(unsigned int irq) -{ - unsigned short sts0,sts1; - - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_se7206_irq(irq); - /* FPGA isr clear */ - sts0 = ctrl_inw(INTSTS0); - sts1 = ctrl_inw(INTSTS1); - - switch (irq) { - case IRQ0_IRQ: - sts0 &= ~0x0010; - break; - case IRQ1_IRQ: - sts0 &= ~0x000f; - break; - case IRQ2_IRQ: - sts0 &= ~0x0f00; - sts1 &= ~0x00ff; - break; - } - ctrl_outw(sts0, INTSTS0); - ctrl_outw(sts1, INTSTS1); -} - -static struct irq_chip se7206_irq_chip __read_mostly = { - .name = "SE7206-FPGA-IRQ", - .mask = disable_se7206_irq, - .unmask = enable_se7206_irq, - .mask_ack = disable_se7206_irq, - .eoi = eoi_se7206_irq, -}; - -static void make_se7206_irq(unsigned int irq) -{ - disable_irq_nosync(irq); - set_irq_chip_and_handler_name(irq, &se7206_irq_chip, - handle_level_irq, "level"); - disable_se7206_irq(irq); -} - -/* - * Initialize IRQ setting - */ -void __init init_se7206_IRQ(void) -{ - make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */ - make_se7206_irq(IRQ1_IRQ); /* ATA */ - make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ - ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */ - - /* FPGA System register setup*/ - ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */ - ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */ - /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */ - ctrl_outw(0x0001,INTSEL); -} diff --git a/trunk/arch/sh/boards/se/7206/led.c b/trunk/arch/sh/boards/se/7206/led.c deleted file mode 100644 index ef794601ab86..000000000000 --- a/trunk/arch/sh/boards/se/7206/led.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * linux/arch/sh/kernel/led_se.c - * - * Copyright (C) 2000 Stuart Menefy - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * This file contains Solution Engine specific LED code. - */ - -#include -#include - -#ifdef CONFIG_HEARTBEAT - -#include - -/* Cycle the LED's in the clasic Knightrider/Sun pattern */ -void heartbeat_se(void) -{ - static unsigned int cnt = 0, period = 0; - volatile unsigned short* p = (volatile unsigned short*)PA_LED; - static unsigned bit = 0, up = 1; - - cnt += 1; - if (cnt < period) { - return; - } - - cnt = 0; - - /* Go through the points (roughly!): - * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110 - */ - period = 110 - ( (300< -#include -#include -#include -#include - -static struct resource smc91x_resources[] = { - [0] = { - .start = 0x300, - .end = 0x300 + 0x020 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = 64, - .end = 64, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device smc91x_device = { - .name = "smc91x", - .id = -1, - .num_resources = ARRAY_SIZE(smc91x_resources), - .resource = smc91x_resources, -}; - -static int __init se7206_devices_setup(void) -{ - return platform_device_register(&smc91x_device); -} - -__initcall(se7206_devices_setup); - -void heartbeat_se(void); - -/* - * The Machine Vector - */ - -struct sh_machine_vector mv_se __initmv = { - .mv_name = "SolutionEngine", - .mv_nr_irqs = 256, - .mv_inb = se7206_inb, - .mv_inw = se7206_inw, - .mv_inl = se7206_inl, - .mv_outb = se7206_outb, - .mv_outw = se7206_outw, - .mv_outl = se7206_outl, - - .mv_inb_p = se7206_inb_p, - .mv_inw_p = se7206_inw, - .mv_inl_p = se7206_inl, - .mv_outb_p = se7206_outb_p, - .mv_outw_p = se7206_outw, - .mv_outl_p = se7206_outl, - - .mv_insb = se7206_insb, - .mv_insw = se7206_insw, - .mv_insl = se7206_insl, - .mv_outsb = se7206_outsb, - .mv_outsw = se7206_outsw, - .mv_outsl = se7206_outsl, - - .mv_init_irq = init_se7206_IRQ, -#ifdef CONFIG_HEARTBEAT - .mv_heartbeat = heartbeat_se, -#endif -}; -ALIAS_MV(se) diff --git a/trunk/arch/sh/boards/se/7619/Makefile b/trunk/arch/sh/boards/se/7619/Makefile deleted file mode 100644 index 3666eca8a658..000000000000 --- a/trunk/arch/sh/boards/se/7619/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the 7619 SolutionEngine specific parts of the kernel -# - -obj-y := setup.o io.o diff --git a/trunk/arch/sh/boards/se/7619/io.c b/trunk/arch/sh/boards/se/7619/io.c deleted file mode 100644 index 176f1f39cd9d..000000000000 --- a/trunk/arch/sh/boards/se/7619/io.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * - * linux/arch/sh/boards/se/7619/io.c - * - * Copyright (C) 2006 Yoshinori Sato - * - * I/O routine for Hitachi 7619 SolutionEngine. - * - */ - -#include -#include -#include -#include -#include - -/* FIXME: M3A-ZAB7 Compact Flash Slot support */ - -static inline void delay(void) -{ - ctrl_inw(0xa0000000); /* Uncached ROM area (P2) */ -} - -#define badio(name,port) \ - printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \ - #name, (port), (__u32) __builtin_return_address(0)) - -unsigned char se7619___inb(unsigned long port) -{ - badio(inb, port); - return 0; -} - -unsigned char se7619___inb_p(unsigned long port) -{ - badio(inb_p, port); - delay(); - return 0; -} - -unsigned short se7619___inw(unsigned long port) -{ - badio(inw, port); - return 0; -} - -unsigned int se7619___inl(unsigned long port) -{ - badio(inl, port); - return 0; -} - -void se7619___outb(unsigned char value, unsigned long port) -{ - badio(outb, port); -} - -void se7619___outb_p(unsigned char value, unsigned long port) -{ - badio(outb_p, port); - delay(); -} - -void se7619___outw(unsigned short value, unsigned long port) -{ - badio(outw, port); -} - -void se7619___outl(unsigned int value, unsigned long port) -{ - badio(outl, port); -} - -void se7619___insb(unsigned long port, void *addr, unsigned long count) -{ - badio(inw, port); -} - -void se7619___insw(unsigned long port, void *addr, unsigned long count) -{ - badio(inw, port); -} - -void se7619___insl(unsigned long port, void *addr, unsigned long count) -{ - badio(insl, port); -} - -void se7619___outsb(unsigned long port, const void *addr, unsigned long count) -{ - badio(insl, port); -} - -void se7619___outsw(unsigned long port, const void *addr, unsigned long count) -{ - badio(insl, port); -} - -void se7619___outsl(unsigned long port, const void *addr, unsigned long count) -{ - badio(outsw, port); -} diff --git a/trunk/arch/sh/boards/se/7619/setup.c b/trunk/arch/sh/boards/se/7619/setup.c deleted file mode 100644 index e627b26de0d0..000000000000 --- a/trunk/arch/sh/boards/se/7619/setup.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * arch/sh/boards/se/7619/setup.c - * - * Copyright (C) 2006 Yoshinori Sato - * - * Hitachi SH7619 SolutionEngine Support. - */ - -#include -#include -#include -#include -#include - -/* - * The Machine Vector - */ - -struct sh_machine_vector mv_se __initmv = { - .mv_name = "SolutionEngine", - .mv_nr_irqs = 108, - .mv_inb = se7619___inb, - .mv_inw = se7619___inw, - .mv_inl = se7619___inl, - .mv_outb = se7619___outb, - .mv_outw = se7619___outw, - .mv_outl = se7619___outl, - - .mv_inb_p = se7619___inb_p, - .mv_inw_p = se7619___inw, - .mv_inl_p = se7619___inl, - .mv_outb_p = se7619___outb_p, - .mv_outw_p = se7619___outw, - .mv_outl_p = se7619___outl, - - .mv_insb = se7619___insb, - .mv_insw = se7619___insw, - .mv_insl = se7619___insl, - .mv_outsb = se7619___outsb, - .mv_outsw = se7619___outsw, - .mv_outsl = se7619___outsl, -}; -ALIAS_MV(se) diff --git a/trunk/arch/sh/boards/titan/setup.c b/trunk/arch/sh/boards/titan/setup.c index 6bcd939bfaed..a6046d93758b 100644 --- a/trunk/arch/sh/boards/titan/setup.c +++ b/trunk/arch/sh/boards/titan/setup.c @@ -1,30 +1,26 @@ /* - * arch/sh/boards/titan/setup.c - Setup for Titan - * - * Copyright (C) 2006 Jamie Lenehan - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Setup for Titan */ + #include -#include +#include #include #include +extern void __init pcibios_init_platform(void); + static struct ipr_data titan_ipr_map[] = { - /* IRQ, IPR idx, shift, prio */ - { TITAN_IRQ_WAN, 3, 12, 8 }, /* eth0 (WAN) */ - { TITAN_IRQ_LAN, 3, 8, 8 }, /* eth1 (LAN) */ - { TITAN_IRQ_MPCIA, 3, 4, 8 }, /* mPCI A (top) */ - { TITAN_IRQ_USB, 3, 0, 8 }, /* mPCI B (bottom), USB */ + { TITAN_IRQ_WAN, IRL0_IPR_ADDR, IRL0_IPR_POS, IRL0_PRIORITY }, + { TITAN_IRQ_LAN, IRL1_IPR_ADDR, IRL1_IPR_POS, IRL1_PRIORITY }, + { TITAN_IRQ_MPCIA, IRL2_IPR_ADDR, IRL2_IPR_POS, IRL2_PRIORITY }, + { TITAN_IRQ_USB, IRL3_IPR_ADDR, IRL3_IPR_POS, IRL3_PRIORITY }, }; static void __init init_titan_irq(void) { /* enable individual interrupt mode for externals */ - ipr_irq_enable_irlm(); - /* register ipr irqs */ + ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map)); } @@ -51,5 +47,6 @@ struct sh_machine_vector mv_titan __initmv = { .mv_ioport_map = titan_ioport_map, .mv_init_irq = init_titan_irq, + .mv_init_pci = pcibios_init_platform, }; ALIAS_MV(titan) diff --git a/trunk/arch/sh/boot/compressed/misc.c b/trunk/arch/sh/boot/compressed/misc.c index 35452d85b7f7..f2fed5ce5cc3 100644 --- a/trunk/arch/sh/boot/compressed/misc.c +++ b/trunk/arch/sh/boot/compressed/misc.c @@ -12,7 +12,6 @@ */ #include -#include #ifdef CONFIG_SH_STANDARD_BIOS #include #endif @@ -229,7 +228,7 @@ long* stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { output_data = 0; - output_ptr = P2SEGADDR((unsigned long)&_text+0x1000); + output_ptr = (unsigned long)&_text+0x20001000; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; diff --git a/trunk/arch/sh/configs/r7780rp_defconfig b/trunk/arch/sh/configs/r7780rp_defconfig index 2b75b4896ba5..34e2046c3213 100644 --- a/trunk/arch/sh/configs/r7780rp_defconfig +++ b/trunk/arch/sh/configs/r7780rp_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.19 -# Wed Dec 6 11:59:38 2006 +# Linux kernel version: 2.6.19-rc3 +# Tue Oct 31 12:32:06 2006 # CONFIG_SUPERH=y CONFIG_RWSEM_GENERIC_SPINLOCK=y @@ -11,8 +11,6 @@ CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_CALIBRATE_DELAY=y # CONFIG_GENERIC_TIME is not set -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" # @@ -39,7 +37,6 @@ CONFIG_BSD_PROCESS_ACCT=y # CONFIG_AUDIT is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y -# CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -121,8 +118,6 @@ CONFIG_SH_R7780RP=y # CONFIG_SH_LANDISK is not set # CONFIG_SH_TITAN is not set # CONFIG_SH_SHMIN is not set -# CONFIG_SH_7206_SOLUTION_ENGINE is not set -# CONFIG_SH_7619_SOLUTION_ENGINE is not set # CONFIG_SH_UNKNOWN is not set # @@ -135,12 +130,6 @@ CONFIG_CPU_SH4A=y # SH-2 Processor Support # # CONFIG_CPU_SUBTYPE_SH7604 is not set -# CONFIG_CPU_SUBTYPE_SH7619 is not set - -# -# SH-2A Processor Support -# -# CONFIG_CPU_SUBTYPE_SH7206 is not set # # SH-3 Processor Support @@ -176,7 +165,6 @@ CONFIG_CPU_SH4A=y # # CONFIG_CPU_SUBTYPE_SH7770 is not set CONFIG_CPU_SUBTYPE_SH7780=y -# CONFIG_CPU_SUBTYPE_SH7785 is not set # # SH4AL-DSP Processor Support @@ -193,14 +181,8 @@ CONFIG_MEMORY_START=0x08000000 CONFIG_MEMORY_SIZE=0x08000000 # CONFIG_32BIT is not set CONFIG_VSYSCALL=y -CONFIG_PAGE_SIZE_4KB=y -# CONFIG_PAGE_SIZE_8KB is not set -# CONFIG_PAGE_SIZE_64KB is not set CONFIG_HUGETLB_PAGE_SIZE_64K=y -# CONFIG_HUGETLB_PAGE_SIZE_256K is not set # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set -# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set -# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y # CONFIG_DISCONTIGMEM_MANUAL is not set @@ -222,14 +204,12 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 # Processor features # CONFIG_CPU_LITTLE_ENDIAN=y -# CONFIG_CPU_BIG_ENDIAN is not set CONFIG_SH_FPU=y # CONFIG_SH_DSP is not set CONFIG_SH_STORE_QUEUES=y CONFIG_CPU_HAS_INTEVT=y CONFIG_CPU_HAS_INTC2_IRQ=y CONFIG_CPU_HAS_SR_RB=y -CONFIG_CPU_HAS_PTEA=y # # Timer support @@ -240,8 +220,6 @@ CONFIG_SH_TMU=y # R7780RP options # CONFIG_SH_R7780MP=y -CONFIG_SH_TIMER_IRQ=28 -CONFIG_NO_IDLE_HZ=y CONFIG_SH_PCLK_FREQ=32000000 # @@ -259,11 +237,6 @@ CONFIG_SH_PCLK_FREQ=32000000 # # CONFIG_HD6446X_SERIES is not set -# -# Additional SuperH Device Drivers -# -CONFIG_PUSH_SWITCH=y - # # Kernel features # @@ -271,7 +244,7 @@ CONFIG_PUSH_SWITCH=y CONFIG_HZ_250=y # CONFIG_HZ_1000 is not set CONFIG_HZ=250 -CONFIG_KEXEC=y +# CONFIG_KEXEC is not set # CONFIG_SMP is not set # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set @@ -305,7 +278,10 @@ CONFIG_PCI_AUTO_UPDATE_RESOURCES=y # # PCI Hotplug Support # -# CONFIG_HOTPLUG_PCI is not set +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_FAKE is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set # # Executable file formats @@ -365,7 +341,6 @@ CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set # CONFIG_IPV6 is not set # CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set @@ -581,7 +556,6 @@ CONFIG_SATA_SIL=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_JMICRON is not set # CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_MARVELL is not set # CONFIG_PATA_MPIIX is not set # CONFIG_PATA_OLDPIIX is not set # CONFIG_PATA_NETCELL is not set @@ -598,7 +572,6 @@ CONFIG_SATA_SIL=y # CONFIG_PATA_SIS is not set # CONFIG_PATA_VIA is not set # CONFIG_PATA_WINBOND is not set -CONFIG_PATA_PLATFORM=y # # Multi-device support (RAID and LVM) @@ -715,7 +688,6 @@ CONFIG_R8169=y # CONFIG_IXGB is not set # CONFIG_S2IO is not set # CONFIG_MYRI10GE is not set -# CONFIG_NETXEN_NIC is not set # # Token Ring devices @@ -858,6 +830,10 @@ CONFIG_HW_RANDOM=y # CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# # CONFIG_DRM is not set # CONFIG_RAW_DRIVER is not set @@ -1044,7 +1020,7 @@ CONFIG_INOTIFY_USER=y CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set -CONFIG_FUSE_FS=m +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -1076,7 +1052,7 @@ CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y -CONFIG_CONFIGFS_FS=m +# CONFIG_CONFIGFS_FS is not set # # Miscellaneous filesystems @@ -1177,33 +1153,28 @@ CONFIG_NLS_ISO8859_1=y # # Profiling support # -CONFIG_PROFILING=y -CONFIG_OPROFILE=m +# CONFIG_PROFILING is not set # # Kernel hacking # -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_MUST_CHECK=y -CONFIG_MAGIC_SYSRQ=y +# CONFIG_MAGIC_SYSRQ is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_KERNEL=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_SPINLOCK=y # CONFIG_DEBUG_MUTEXES is not set # CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_INFO is not set CONFIG_DEBUG_FS=y # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set @@ -1213,7 +1184,7 @@ CONFIG_FORCED_INLINING=y # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_SH_STANDARD_BIOS is not set # CONFIG_EARLY_SCIF_CONSOLE is not set -CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_4KSTACKS is not set # CONFIG_KGDB is not set diff --git a/trunk/arch/sh/configs/se7206_defconfig b/trunk/arch/sh/configs/se7206_defconfig deleted file mode 100644 index 36cec0b6e7c1..000000000000 --- a/trunk/arch/sh/configs/se7206_defconfig +++ /dev/null @@ -1,826 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux kernel version: 2.6.19-rc4 -# Sun Nov 5 16:20:10 2006 -# -CONFIG_SUPERH=y -CONFIG_RWSEM_GENERIC_SPINLOCK=y -CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -# CONFIG_GENERIC_TIME is not set -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" - -# -# Code maturity level options -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=32 - -# -# General setup -# -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -# CONFIG_SYSVIPC is not set -# CONFIG_POSIX_MQUEUE is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_TASKSTATS is not set -# CONFIG_UTS_NS is not set -# CONFIG_AUDIT is not set -# CONFIG_IKCONFIG is not set -# CONFIG_RELAY is not set -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_EMBEDDED=y -CONFIG_UID16=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_EXTRA_PASS is not set -# CONFIG_HOTPLUG is not set -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -# CONFIG_FUTEX is not set -# CONFIG_EPOLL is not set -CONFIG_SLAB=y -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_TINY_SHMEM=y -CONFIG_BASE_SMALL=0 -# CONFIG_SLOB is not set - -# -# Loadable module support -# -# CONFIG_MODULES is not set - -# -# Block layer -# -CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_LSF is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_AS is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -# CONFIG_DEFAULT_AS is not set -# CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set -CONFIG_DEFAULT_NOOP=y -CONFIG_DEFAULT_IOSCHED="noop" - -# -# System type -# -# CONFIG_SH_SOLUTION_ENGINE is not set -# CONFIG_SH_7751_SOLUTION_ENGINE is not set -# CONFIG_SH_7300_SOLUTION_ENGINE is not set -# CONFIG_SH_7343_SOLUTION_ENGINE is not set -# CONFIG_SH_73180_SOLUTION_ENGINE is not set -# CONFIG_SH_7751_SYSTEMH is not set -# CONFIG_SH_HP6XX is not set -# CONFIG_SH_EC3104 is not set -# CONFIG_SH_SATURN is not set -# CONFIG_SH_DREAMCAST is not set -# CONFIG_SH_BIGSUR is not set -# CONFIG_SH_MPC1211 is not set -# CONFIG_SH_SH03 is not set -# CONFIG_SH_SECUREEDGE5410 is not set -# CONFIG_SH_HS7751RVOIP is not set -# CONFIG_SH_7710VOIPGW is not set -# CONFIG_SH_RTS7751R2D is not set -# CONFIG_SH_R7780RP is not set -# CONFIG_SH_EDOSK7705 is not set -# CONFIG_SH_SH4202_MICRODEV is not set -# CONFIG_SH_LANDISK is not set -# CONFIG_SH_TITAN is not set -# CONFIG_SH_SHMIN is not set -CONFIG_SH_7206_SOLUTION_ENGINE=y -# CONFIG_SH_7619_SOLUTION_ENGINE is not set -# CONFIG_SH_UNKNOWN is not set - -# -# Processor selection -# -CONFIG_CPU_SH2=y -CONFIG_CPU_SH2A=y - -# -# SH-2 Processor Support -# -# CONFIG_CPU_SUBTYPE_SH7604 is not set -# CONFIG_CPU_SUBTYPE_SH7619 is not set - -# -# SH-2A Processor Support -# -CONFIG_CPU_SUBTYPE_SH7206=y - -# -# SH-3 Processor Support -# -# CONFIG_CPU_SUBTYPE_SH7300 is not set -# CONFIG_CPU_SUBTYPE_SH7705 is not set -# CONFIG_CPU_SUBTYPE_SH7706 is not set -# CONFIG_CPU_SUBTYPE_SH7707 is not set -# CONFIG_CPU_SUBTYPE_SH7708 is not set -# CONFIG_CPU_SUBTYPE_SH7709 is not set -# CONFIG_CPU_SUBTYPE_SH7710 is not set - -# -# SH-4 Processor Support -# -# CONFIG_CPU_SUBTYPE_SH7750 is not set -# CONFIG_CPU_SUBTYPE_SH7091 is not set -# CONFIG_CPU_SUBTYPE_SH7750R is not set -# CONFIG_CPU_SUBTYPE_SH7750S is not set -# CONFIG_CPU_SUBTYPE_SH7751 is not set -# CONFIG_CPU_SUBTYPE_SH7751R is not set -# CONFIG_CPU_SUBTYPE_SH7760 is not set -# CONFIG_CPU_SUBTYPE_SH4_202 is not set - -# -# ST40 Processor Support -# -# CONFIG_CPU_SUBTYPE_ST40STB1 is not set -# CONFIG_CPU_SUBTYPE_ST40GX1 is not set - -# -# SH-4A Processor Support -# -# CONFIG_CPU_SUBTYPE_SH7770 is not set -# CONFIG_CPU_SUBTYPE_SH7780 is not set - -# -# SH4AL-DSP Processor Support -# -# CONFIG_CPU_SUBTYPE_SH73180 is not set -# CONFIG_CPU_SUBTYPE_SH7343 is not set - -# -# Memory management options -# -CONFIG_PAGE_OFFSET=0x00000000 -CONFIG_MEMORY_START=0x0c000000 -CONFIG_MEMORY_SIZE=0x02000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set - -# -# Cache configuration -# -# CONFIG_SH_DIRECT_MAPPED is not set -# CONFIG_SH_WRITETHROUGH is not set -# CONFIG_SH_OCRAM is not set - -# -# Processor features -# -# CONFIG_CPU_LITTLE_ENDIAN is not set -# CONFIG_SH_FPU is not set -# CONFIG_SH_FPU_EMU is not set -# CONFIG_SH_DSP is not set - -# -# Timer support -# -CONFIG_SH_CMT=y -# CONFIG_SH_MTU2 is not set -CONFIG_SH_PCLK_FREQ=33333333 -CONFIG_SH_CLK_MD=6 - -# -# CPU Frequency scaling -# -# CONFIG_CPU_FREQ is not set - -# -# DMA support -# -# CONFIG_SH_DMA is not set - -# -# Companion Chips -# -# CONFIG_HD6446X_SERIES is not set - -# -# Kernel features -# -CONFIG_HZ_100=y -# CONFIG_HZ_250 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=100 -# CONFIG_KEXEC is not set -# CONFIG_SMP is not set -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set - -# -# Boot options -# -CONFIG_ZERO_PAGE_OFFSET=0x00001000 -CONFIG_BOOT_LINK_OFFSET=0x00800000 -# CONFIG_UBC_WAKEUP is not set -# CONFIG_CMDLINE_BOOL is not set - -# -# Bus options -# -# CONFIG_PCI is not set - -# -# PCCARD (PCMCIA/CardBus) support -# - -# -# PCI Hotplug Support -# - -# -# Executable file formats -# -CONFIG_BINFMT_FLAT=y -CONFIG_BINFMT_ZFLAT=y -# CONFIG_BINFMT_SHARED_FLAT is not set -# CONFIG_BINFMT_MISC is not set - -# -# Power management options (EXPERIMENTAL) -# -# CONFIG_PM is not set - -# -# Networking -# -CONFIG_NET=y - -# -# Networking options -# -# CONFIG_NETDEBUG is not set -# CONFIG_PACKET is not set -# CONFIG_UNIX is not set -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_NET_KEY is not set -CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set -CONFIG_IP_FIB_HASH=y -# CONFIG_IP_PNP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -# CONFIG_INET_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_IPV6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETFILTER is not set - -# -# DCCP Configuration (EXPERIMENTAL) -# -# CONFIG_IP_DCCP is not set - -# -# SCTP Configuration (EXPERIMENTAL) -# -# CONFIG_IP_SCTP is not set - -# -# TIPC Configuration (EXPERIMENTAL) -# -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_BRIDGE is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set - -# -# QoS and/or fair queueing -# -# CONFIG_NET_SCHED is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_IEEE80211 is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -# CONFIG_SYS_HYPERVISOR is not set - -# -# Connector - unified userspace <-> kernelspace linker -# -# CONFIG_CONNECTOR is not set - -# -# Memory Technology Devices (MTD) -# -CONFIG_MTD=y -# CONFIG_MTD_DEBUG is not set -# CONFIG_MTD_CONCAT is not set -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 -# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set -# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set -# CONFIG_MTD_CMDLINE_PARTS is not set - -# -# User Modules And Translation Layers -# -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=y -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_GEN_PROBE=y -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_CFI_I4 is not set -# CONFIG_MTD_CFI_I8 is not set -# CONFIG_MTD_CFI_INTELEXT is not set -CONFIG_MTD_CFI_AMDSTD=y -# CONFIG_MTD_CFI_STAA is not set -CONFIG_MTD_CFI_UTIL=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# CONFIG_MTD_OBSOLETE_CHIPS is not set - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x20000000 -CONFIG_MTD_PHYSMAP_LEN=0x1000000 -CONFIG_MTD_PHYSMAP_BANKWIDTH=4 -# CONFIG_MTD_SOLUTIONENGINE is not set -# CONFIG_MTD_UCLINUX is not set -# CONFIG_MTD_PLATRAM is not set - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOC2000 is not set -# CONFIG_MTD_DOC2001 is not set -# CONFIG_MTD_DOC2001PLUS is not set - -# -# NAND Flash Device Drivers -# -# CONFIG_MTD_NAND is not set - -# -# OneNAND Flash Device Drivers -# -# CONFIG_MTD_ONENAND is not set - -# -# Parallel port support -# -# CONFIG_PARPORT is not set - -# -# Plug and Play support -# - -# -# Block devices -# -# CONFIG_BLK_DEV_COW_COMMON is not set -# CONFIG_BLK_DEV_LOOP is not set -# CONFIG_BLK_DEV_NBD is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 -# CONFIG_BLK_DEV_INITRD is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set - -# -# Misc devices -# -# CONFIG_TIFM_CORE is not set - -# -# ATA/ATAPI/MFM/RLL support -# -# CONFIG_IDE is not set - -# -# SCSI device support -# -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_NETLINK is not set - -# -# Serial ATA (prod) and Parallel ATA (experimental) drivers -# -# CONFIG_ATA is not set - -# -# Multi-device support (RAID and LVM) -# -# CONFIG_MD is not set - -# -# Fusion MPT device support -# -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# - -# -# I2O device support -# - -# -# Network device support -# -# CONFIG_NETDEVICES is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set - -# -# ISDN subsystem -# -# CONFIG_ISDN is not set - -# -# Telephony Support -# -# CONFIG_PHONE is not set - -# -# Input device support -# -# CONFIG_INPUT is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -# CONFIG_VT is not set -# CONFIG_SERIAL_NONSTANDARD is not set - -# -# Serial drivers -# -# CONFIG_SERIAL_8250 is not set - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=4 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=256 - -# -# IPMI -# -# CONFIG_IPMI_HANDLER is not set - -# -# Watchdog Cards -# -# CONFIG_WATCHDOG is not set -CONFIG_HW_RANDOM=y -# CONFIG_GEN_RTC is not set -# CONFIG_DTLK is not set -# CONFIG_R3964 is not set - -# -# Ftape, the floppy tape device driver -# -# CONFIG_RAW_DRIVER is not set - -# -# TPM devices -# -# CONFIG_TCG_TPM is not set - -# -# I2C support -# -# CONFIG_I2C is not set - -# -# SPI support -# -# CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set - -# -# Dallas's 1-wire bus -# -# CONFIG_W1 is not set - -# -# Hardware Monitoring support -# -CONFIG_HWMON=y -# CONFIG_HWMON_VID is not set -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set - -# -# Digital Video Broadcasting Devices -# -# CONFIG_DVB is not set - -# -# Graphics support -# -CONFIG_FIRMWARE_EDID=y -# CONFIG_FB is not set - -# -# Sound -# -# CONFIG_SOUND is not set - -# -# USB support -# -# CONFIG_USB_ARCH_HAS_HCD is not set -# CONFIG_USB_ARCH_HAS_OHCI is not set -# CONFIG_USB_ARCH_HAS_EHCI is not set - -# -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' -# - -# -# USB Gadget Support -# -# CONFIG_USB_GADGET is not set - -# -# MMC/SD Card support -# -# CONFIG_MMC is not set - -# -# LED devices -# -# CONFIG_NEW_LEDS is not set - -# -# LED drivers -# - -# -# LED Triggers -# - -# -# InfiniBand support -# - -# -# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) -# - -# -# Real Time Clock -# -# CONFIG_RTC_CLASS is not set - -# -# DMA Engine support -# -# CONFIG_DMA_ENGINE is not set - -# -# DMA Clients -# - -# -# DMA Devices -# - -# -# File systems -# -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4DEV_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_FS_POSIX_ACL is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_MINIX_FS is not set -CONFIG_ROMFS_FS=y -# CONFIG_INOTIFY is not set -# CONFIG_QUOTA is not set -# CONFIG_DNOTIFY is not set -# CONFIG_AUTOFS_FS is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -# CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -# CONFIG_SYSFS is not set -# CONFIG_TMPFS is not set -# CONFIG_HUGETLBFS is not set -# CONFIG_HUGETLB_PAGE is not set -CONFIG_RAMFS=y - -# -# Miscellaneous filesystems -# -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS_FS is not set -# CONFIG_JFFS2_FS is not set -CONFIG_CRAMFS=y -# CONFIG_VXFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set - -# -# Network File Systems -# -# CONFIG_NFS_FS is not set -# CONFIG_NFSD is not set -# CONFIG_SMB_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -# CONFIG_9P_FS is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y - -# -# Native Language Support -# -# CONFIG_NLS is not set - -# -# Profiling support -# -# CONFIG_PROFILING is not set - -# -# Kernel hacking -# -# CONFIG_PRINTK_TIME is not set -CONFIG_ENABLE_MUST_CHECK=y -# CONFIG_MAGIC_SYSRQ is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_KERNEL is not set -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_UNWIND_INFO is not set -# CONFIG_HEADERS_CHECK is not set -# CONFIG_SH_STANDARD_BIOS is not set -# CONFIG_EARLY_SCIF_CONSOLE is not set -# CONFIG_KGDB is not set - -# -# Security options -# -# CONFIG_KEYS is not set - -# -# Cryptographic options -# -# CONFIG_CRYPTO is not set - -# -# Library routines -# -CONFIG_CRC_CCITT=y -# CONFIG_CRC16 is not set -CONFIG_CRC32=y -# CONFIG_LIBCRC32C is not set -CONFIG_ZLIB_INFLATE=y diff --git a/trunk/arch/sh/drivers/Kconfig b/trunk/arch/sh/drivers/Kconfig deleted file mode 100644 index c54c758e6243..000000000000 --- a/trunk/arch/sh/drivers/Kconfig +++ /dev/null @@ -1,9 +0,0 @@ -menu "Additional SuperH Device Drivers" - -config PUSH_SWITCH - tristate "Push switch support" - help - This enables support for the push switch framework, a simple - framework that allows for sysfs driven switch status reporting. - -endmenu diff --git a/trunk/arch/sh/drivers/Makefile b/trunk/arch/sh/drivers/Makefile index bf18dbfb6787..338c3729d270 100644 --- a/trunk/arch/sh/drivers/Makefile +++ b/trunk/arch/sh/drivers/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_SH_DMA) += dma/ obj-$(CONFIG_SUPERHYWAY) += superhyway/ -obj-$(CONFIG_PUSH_SWITCH) += push-switch.o + diff --git a/trunk/arch/sh/drivers/dma/Makefile b/trunk/arch/sh/drivers/dma/Makefile index db1295d32268..065d4c90970e 100644 --- a/trunk/arch/sh/drivers/dma/Makefile +++ b/trunk/arch/sh/drivers/dma/Makefile @@ -2,8 +2,8 @@ # Makefile for the SuperH DMA specific kernel interface routines under Linux. # -obj-y += dma-api.o -obj-$(CONFIG_ISA_DMA_API) += dma-isa.o +obj-y += dma-api.o dma-isa.o obj-$(CONFIG_SYSFS) += dma-sysfs.o obj-$(CONFIG_SH_DMA) += dma-sh.o obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o + diff --git a/trunk/arch/sh/drivers/dma/dma-api.c b/trunk/arch/sh/drivers/dma/dma-api.c index e062067edd24..47c3e837599b 100644 --- a/trunk/arch/sh/drivers/dma/dma-api.c +++ b/trunk/arch/sh/drivers/dma/dma-api.c @@ -11,27 +11,61 @@ */ #include #include +#include #include #include #include #include -#include #include DEFINE_SPINLOCK(dma_spin_lock); static LIST_HEAD(registered_dmac_list); +/* + * A brief note about the reasons for this API as it stands. + * + * For starters, the old ISA DMA API didn't work for us for a number of + * reasons, for one, the vast majority of channels on the SH DMAC are + * dual-address mode only, and both the new and the old DMA APIs are after the + * concept of managing a DMA buffer, which doesn't overly fit this model very + * well. In addition to which, the new API is largely geared at IOMMUs and + * GARTs, and doesn't even support the channel notion very well. + * + * The other thing that's a marginal issue, is the sheer number of random DMA + * engines that are present (ie, in boards like the Dreamcast), some of which + * cascade off of the SH DMAC, and others do not. As such, there was a real + * need for a scalable subsystem that could deal with both single and + * dual-address mode usage, in addition to interoperating with cascaded DMACs. + * + * There really isn't any reason why this needs to be SH specific, though I'm + * not aware of too many other processors (with the exception of some MIPS) + * that have the same concept of a dual address mode, or any real desire to + * actually make use of the DMAC even if such a subsystem were exposed + * elsewhere. + * + * The idea for this was derived from the ARM port, which acted as an excellent + * reference when trying to address these issues. + * + * It should also be noted that the decision to add Yet Another DMA API(tm) to + * the kernel wasn't made easily, and was only decided upon after conferring + * with jejb with regards to the state of the old and new APIs as they applied + * to these circumstances. Philip Blundell was also a great help in figuring + * out some single-address mode DMA semantics that were otherwise rather + * confusing. + */ + struct dma_info *get_dma_info(unsigned int chan) { struct dma_info *info; + unsigned int total = 0; /* * Look for each DMAC's range to determine who the owner of * the channel is. */ list_for_each_entry(info, ®istered_dmac_list, list) { - if ((chan < info->first_channel_nr) || - (chan >= info->first_channel_nr + info->nr_channels)) + total += info->nr_channels; + if (chan > total) continue; return info; @@ -39,22 +73,6 @@ struct dma_info *get_dma_info(unsigned int chan) return NULL; } -EXPORT_SYMBOL(get_dma_info); - -struct dma_info *get_dma_info_by_name(const char *dmac_name) -{ - struct dma_info *info; - - list_for_each_entry(info, ®istered_dmac_list, list) { - if (dmac_name && (strcmp(dmac_name, info->name) != 0)) - continue; - else - return info; - } - - return NULL; -} -EXPORT_SYMBOL(get_dma_info_by_name); static unsigned int get_nr_channels(void) { @@ -73,161 +91,63 @@ static unsigned int get_nr_channels(void) struct dma_channel *get_dma_channel(unsigned int chan) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel; - int i; - if (unlikely(!info)) + if (!info) return ERR_PTR(-EINVAL); - for (i = 0; i < info->nr_channels; i++) { - channel = &info->channels[i]; - if (channel->chan == chan) - return channel; - } - - return NULL; + return info->channels + chan; } -EXPORT_SYMBOL(get_dma_channel); int get_dma_residue(unsigned int chan) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); + struct dma_channel *channel = &info->channels[chan]; if (info->ops->get_residue) return info->ops->get_residue(channel); return 0; } -EXPORT_SYMBOL(get_dma_residue); - -static int search_cap(const char **haystack, const char *needle) -{ - const char **p; - - for (p = haystack; *p; p++) - if (strcmp(*p, needle) == 0) - return 1; - - return 0; -} - -/** - * request_dma_bycap - Allocate a DMA channel based on its capabilities - * @dmac: List of DMA controllers to search - * @caps: List of capabilites - * - * Search all channels of all DMA controllers to find a channel which - * matches the requested capabilities. The result is the channel - * number if a match is found, or %-ENODEV if no match is found. - * - * Note that not all DMA controllers export capabilities, in which - * case they can never be allocated using this API, and so - * request_dma() must be used specifying the channel number. - */ -int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id) -{ - unsigned int found = 0; - struct dma_info *info; - const char **p; - int i; - - BUG_ON(!dmac || !caps); - - list_for_each_entry(info, ®istered_dmac_list, list) - if (strcmp(*dmac, info->name) == 0) { - found = 1; - break; - } - if (!found) - return -ENODEV; - - for (i = 0; i < info->nr_channels; i++) { - struct dma_channel *channel = &info->channels[i]; - - if (unlikely(!channel->caps)) - continue; - - for (p = caps; *p; p++) { - if (!search_cap(channel->caps, *p)) - break; - if (request_dma(channel->chan, dev_id) == 0) - return channel->chan; - } - } - - return -EINVAL; -} -EXPORT_SYMBOL(request_dma_bycap); - -int dmac_search_free_channel(const char *dev_id) +int request_dma(unsigned int chan, const char *dev_id) { - struct dma_channel *channel = { 0 }; - struct dma_info *info = get_dma_info(0); - int i; - - for (i = 0; i < info->nr_channels; i++) { - channel = &info->channels[i]; - if (unlikely(!channel)) - return -ENODEV; - - if (atomic_read(&channel->busy) == 0) - break; - } + struct dma_info *info = get_dma_info(chan); + struct dma_channel *channel = &info->channels[chan]; - if (info->ops->request) { - int result = info->ops->request(channel); - if (result) - return result; + down(&channel->sem); - atomic_set(&channel->busy, 1); - return channel->chan; + if (!info->ops || chan >= MAX_DMA_CHANNELS) { + up(&channel->sem); + return -EINVAL; } - return -ENOSYS; -} - -int request_dma(unsigned int chan, const char *dev_id) -{ - struct dma_channel *channel = { 0 }; - struct dma_info *info = get_dma_info(chan); - int result; - - channel = get_dma_channel(chan); - if (atomic_xchg(&channel->busy, 1)) - return -EBUSY; + atomic_set(&channel->busy, 1); strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); - if (info->ops->request) { - result = info->ops->request(channel); - if (result) - atomic_set(&channel->busy, 0); + up(&channel->sem); - return result; - } + if (info->ops->request) + return info->ops->request(channel); return 0; } -EXPORT_SYMBOL(request_dma); void free_dma(unsigned int chan) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); + struct dma_channel *channel = &info->channels[chan]; if (info->ops->free) info->ops->free(channel); atomic_set(&channel->busy, 0); } -EXPORT_SYMBOL(free_dma); void dma_wait_for_completion(unsigned int chan) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); + struct dma_channel *channel = &info->channels[chan]; if (channel->flags & DMA_TEI_CAPABLE) { wait_event(channel->wait_queue, @@ -238,52 +158,21 @@ void dma_wait_for_completion(unsigned int chan) while (info->ops->get_residue(channel)) cpu_relax(); } -EXPORT_SYMBOL(dma_wait_for_completion); - -int register_chan_caps(const char *dmac, struct dma_chan_caps *caps) -{ - struct dma_info *info; - unsigned int found = 0; - int i; - - list_for_each_entry(info, ®istered_dmac_list, list) - if (strcmp(dmac, info->name) == 0) { - found = 1; - break; - } - - if (unlikely(!found)) - return -ENODEV; - - for (i = 0; i < info->nr_channels; i++, caps++) { - struct dma_channel *channel; - - if ((info->first_channel_nr + i) != caps->ch_num) - return -EINVAL; - - channel = &info->channels[i]; - channel->caps = caps->caplist; - } - - return 0; -} -EXPORT_SYMBOL(register_chan_caps); void dma_configure_channel(unsigned int chan, unsigned long flags) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); + struct dma_channel *channel = &info->channels[chan]; if (info->ops->configure) info->ops->configure(channel, flags); } -EXPORT_SYMBOL(dma_configure_channel); int dma_xfer(unsigned int chan, unsigned long from, unsigned long to, size_t size, unsigned int mode) { struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); + struct dma_channel *channel = &info->channels[chan]; channel->sar = from; channel->dar = to; @@ -292,20 +181,8 @@ int dma_xfer(unsigned int chan, unsigned long from, return info->ops->xfer(channel); } -EXPORT_SYMBOL(dma_xfer); - -int dma_extend(unsigned int chan, unsigned long op, void *param) -{ - struct dma_info *info = get_dma_info(chan); - struct dma_channel *channel = get_dma_channel(chan); - - if (info->ops->extend) - return info->ops->extend(channel, op, param); - - return -ENOSYS; -} -EXPORT_SYMBOL(dma_extend); +#ifdef CONFIG_PROC_FS static int dma_read_proc(char *buf, char **start, off_t off, int len, int *eof, void *data) { @@ -337,6 +214,8 @@ static int dma_read_proc(char *buf, char **start, off_t off, return p - buf; } +#endif + int register_dmac(struct dma_info *info) { @@ -345,7 +224,8 @@ int register_dmac(struct dma_info *info) INIT_LIST_HEAD(&info->list); printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", - info->name, info->nr_channels, info->nr_channels > 1 ? "s" : ""); + info->name, info->nr_channels, + info->nr_channels > 1 ? "s" : ""); BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); @@ -362,26 +242,28 @@ int register_dmac(struct dma_info *info) size = sizeof(struct dma_channel) * info->nr_channels; - info->channels = kzalloc(size, GFP_KERNEL); + info->channels = kmalloc(size, GFP_KERNEL); if (!info->channels) return -ENOMEM; + + memset(info->channels, 0, size); } total_channels = get_nr_channels(); for (i = 0; i < info->nr_channels; i++) { - struct dma_channel *chan = &info->channels[i]; - - atomic_set(&chan->busy, 0); + struct dma_channel *chan = info->channels + i; - chan->chan = info->first_channel_nr + i; - chan->vchan = info->first_channel_nr + i + total_channels; + chan->chan = i; + chan->vchan = i + total_channels; memcpy(chan->dev_id, "Unused", 7); if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) chan->flags |= DMA_TEI_CAPABLE; + init_MUTEX(&chan->sem); init_waitqueue_head(&chan->wait_queue); + dma_create_sysfs_files(chan, info); } @@ -389,7 +271,6 @@ int register_dmac(struct dma_info *info) return 0; } -EXPORT_SYMBOL(register_dmac); void unregister_dmac(struct dma_info *info) { @@ -404,16 +285,31 @@ void unregister_dmac(struct dma_info *info) list_del(&info->list); platform_device_unregister(info->pdev); } -EXPORT_SYMBOL(unregister_dmac); static int __init dma_api_init(void) { - printk(KERN_NOTICE "DMA: Registering DMA API.\n"); + printk("DMA: Registering DMA API.\n"); + +#ifdef CONFIG_PROC_FS create_proc_read_entry("dma", 0, 0, dma_read_proc, 0); +#endif + return 0; } + subsys_initcall(dma_api_init); MODULE_AUTHOR("Paul Mundt "); MODULE_DESCRIPTION("DMA API for SuperH"); MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(request_dma); +EXPORT_SYMBOL(free_dma); +EXPORT_SYMBOL(register_dmac); +EXPORT_SYMBOL(get_dma_residue); +EXPORT_SYMBOL(get_dma_info); +EXPORT_SYMBOL(get_dma_channel); +EXPORT_SYMBOL(dma_xfer); +EXPORT_SYMBOL(dma_wait_for_completion); +EXPORT_SYMBOL(dma_configure_channel); + diff --git a/trunk/arch/sh/drivers/dma/dma-sh.c b/trunk/arch/sh/drivers/dma/dma-sh.c index f63721ed86c2..660786013350 100644 --- a/trunk/arch/sh/drivers/dma/dma-sh.c +++ b/trunk/arch/sh/drivers/dma/dma-sh.c @@ -94,13 +94,20 @@ static int sh_dmac_request_dma(struct dma_channel *chan) if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) return 0; + chan->name = kzalloc(32, GFP_KERNEL); + if (unlikely(chan->name == NULL)) + return -ENOMEM; + snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)", + chan->chan); + return request_irq(get_dmte_irq(chan->chan), dma_tei, - IRQF_DISABLED, chan->dev_id, chan); + IRQF_DISABLED, chan->name, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) { free_irq(get_dmte_irq(chan->chan), chan); + kfree(chan->name); } static void diff --git a/trunk/arch/sh/drivers/dma/dma-sysfs.c b/trunk/arch/sh/drivers/dma/dma-sysfs.c index eebcd4768bbf..29b8ef9873d1 100644 --- a/trunk/arch/sh/drivers/dma/dma-sysfs.c +++ b/trunk/arch/sh/drivers/dma/dma-sysfs.c @@ -3,7 +3,7 @@ * * sysfs interface for SH DMA API * - * Copyright (C) 2004 - 2006 Paul Mundt + * Copyright (C) 2004, 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -21,6 +21,7 @@ static struct sysdev_class dma_sysclass = { set_kset_name("dma"), }; + EXPORT_SYMBOL(dma_sysclass); static ssize_t dma_show_devices(struct sys_device *dev, char *buf) @@ -30,10 +31,7 @@ static ssize_t dma_show_devices(struct sys_device *dev, char *buf) for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct dma_info *info = get_dma_info(i); - struct dma_channel *channel = get_dma_channel(i); - - if (unlikely(!info) || !channel) - continue; + struct dma_channel *channel = &info->channels[i]; len += sprintf(buf + len, "%2d: %14s %s\n", channel->chan, info->name, @@ -127,16 +125,11 @@ int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info) if (ret) return ret; - ret |= sysdev_create_file(dev, &attr_dev_id); - ret |= sysdev_create_file(dev, &attr_count); - ret |= sysdev_create_file(dev, &attr_mode); - ret |= sysdev_create_file(dev, &attr_flags); - ret |= sysdev_create_file(dev, &attr_config); - - if (unlikely(ret)) { - dev_err(&info->pdev->dev, "Failed creating attrs\n"); - return ret; - } + sysdev_create_file(dev, &attr_dev_id); + sysdev_create_file(dev, &attr_count); + sysdev_create_file(dev, &attr_mode); + sysdev_create_file(dev, &attr_flags); + sysdev_create_file(dev, &attr_config); snprintf(name, sizeof(name), "dma%d", chan->chan); return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name); diff --git a/trunk/arch/sh/drivers/pci/ops-titan.c b/trunk/arch/sh/drivers/pci/ops-titan.c index ac8ee2312cd8..cd56d53375e7 100644 --- a/trunk/arch/sh/drivers/pci/ops-titan.c +++ b/trunk/arch/sh/drivers/pci/ops-titan.c @@ -15,21 +15,25 @@ #include #include #include -#include +#include #include #include "pci-sh4.h" -static char titan_irq_tab[] __initdata = { - TITAN_IRQ_WAN, - TITAN_IRQ_LAN, - TITAN_IRQ_MPCIA, - TITAN_IRQ_MPCIB, - TITAN_IRQ_USB, -}; - int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) { - int irq = titan_irq_tab[slot]; + int irq = -1; + + switch (slot) { + case 0: irq = TITAN_IRQ_WAN; break; /* eth0 (WAN) */ + case 1: irq = TITAN_IRQ_LAN; break; /* eth1 (LAN) */ + case 2: irq = TITAN_IRQ_MPCIA; break; /* mPCI A */ + case 3: irq = TITAN_IRQ_MPCIB; break; /* mPCI B */ + case 4: irq = TITAN_IRQ_USB; break; /* USB */ + default: + printk(KERN_INFO "PCI: Bad IRQ mapping " + "request for slot %d\n", slot); + return -1; + } printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n", slot, pin - 1 + 'A', irq); diff --git a/trunk/arch/sh/drivers/pci/pci-sh7780.c b/trunk/arch/sh/drivers/pci/pci-sh7780.c index 602b644c35ad..d6e635296534 100644 --- a/trunk/arch/sh/drivers/pci/pci-sh7780.c +++ b/trunk/arch/sh/drivers/pci/pci-sh7780.c @@ -22,20 +22,6 @@ #include #include "pci-sh4.h" -#define INTC_BASE 0xffd00000 -#define INTC_ICR0 (INTC_BASE+0x0) -#define INTC_ICR1 (INTC_BASE+0x1c) -#define INTC_INTPRI (INTC_BASE+0x10) -#define INTC_INTREQ (INTC_BASE+0x24) -#define INTC_INTMSK0 (INTC_BASE+0x44) -#define INTC_INTMSK1 (INTC_BASE+0x48) -#define INTC_INTMSK2 (INTC_BASE+0x40080) -#define INTC_INTMSKCLR0 (INTC_BASE+0x64) -#define INTC_INTMSKCLR1 (INTC_BASE+0x68) -#define INTC_INTMSKCLR2 (INTC_BASE+0x40084) -#define INTC_INT2MSKR (INTC_BASE+0x40038) -#define INTC_INT2MSKCR (INTC_BASE+0x4003c) - /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports diff --git a/trunk/arch/sh/drivers/push-switch.c b/trunk/arch/sh/drivers/push-switch.c deleted file mode 100644 index f2b9157c314f..000000000000 --- a/trunk/arch/sh/drivers/push-switch.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Generic push-switch framework - * - * Copyright (C) 2006 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include - -#define DRV_NAME "push-switch" -#define DRV_VERSION "0.1.0" - -static ssize_t switch_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct push_switch_platform_info *psw_info = dev->platform_data; - return sprintf(buf, "%s\n", psw_info->name); -} -static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL); - -static void switch_timer(unsigned long data) -{ - struct push_switch *psw = (struct push_switch *)data; - - schedule_work(&psw->work); -} - -static void switch_work_handler(void *data) -{ - struct platform_device *pdev = data; - struct push_switch *psw = platform_get_drvdata(pdev); - - psw->state = 0; - - kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE); -} - -static int switch_drv_probe(struct platform_device *pdev) -{ - struct push_switch_platform_info *psw_info; - struct push_switch *psw; - int ret, irq; - - psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL); - if (unlikely(!psw)) - return -ENOMEM; - - irq = platform_get_irq(pdev, 0); - if (unlikely(irq < 0)) { - ret = -ENODEV; - goto err; - } - - psw_info = pdev->dev.platform_data; - BUG_ON(!psw_info); - - ret = request_irq(irq, psw_info->irq_handler, - IRQF_DISABLED | psw_info->irq_flags, - psw_info->name ? psw_info->name : DRV_NAME, pdev); - if (unlikely(ret < 0)) - goto err; - - if (psw_info->name) { - ret = device_create_file(&pdev->dev, &dev_attr_switch); - if (unlikely(ret)) { - dev_err(&pdev->dev, "Failed creating device attrs\n"); - ret = -EINVAL; - goto err_irq; - } - } - - INIT_WORK(&psw->work, switch_work_handler, pdev); - init_timer(&psw->debounce); - - psw->debounce.function = switch_timer; - psw->debounce.data = (unsigned long)psw; - - platform_set_drvdata(pdev, psw); - - return 0; - -err_irq: - free_irq(irq, pdev); -err: - kfree(psw); - return ret; -} - -static int switch_drv_remove(struct platform_device *pdev) -{ - struct push_switch *psw = platform_get_drvdata(pdev); - struct push_switch_platform_info *psw_info = pdev->dev.platform_data; - int irq = platform_get_irq(pdev, 0); - - if (psw_info->name) - device_remove_file(&pdev->dev, &dev_attr_switch); - - platform_set_drvdata(pdev, NULL); - flush_scheduled_work(); - del_timer_sync(&psw->debounce); - free_irq(irq, pdev); - - kfree(psw); - - return 0; -} - -static struct platform_driver switch_driver = { - .probe = switch_drv_probe, - .remove = switch_drv_remove, - .driver = { - .name = DRV_NAME, - }, -}; - -static int __init switch_init(void) -{ - printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION); - return platform_driver_register(&switch_driver); -} - -static void __exit switch_exit(void) -{ - platform_driver_unregister(&switch_driver); -} -module_init(switch_init); -module_exit(switch_exit); - -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR("Paul Mundt"); -MODULE_LICENSE("GPLv2"); diff --git a/trunk/arch/sh/kernel/Makefile b/trunk/arch/sh/kernel/Makefile index 99c7e5249f7a..5da88a43d350 100644 --- a/trunk/arch/sh/kernel/Makefile +++ b/trunk/arch/sh/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o init_task.o vmlinux.lds -obj-y := process.o signal.o traps.o irq.o \ +obj-y := process.o signal.o entry.o traps.o irq.o \ ptrace.o setup.o time.o sys_sh.o semaphore.o \ io.o io_generic.o sh_ksyms.o syscalls.o @@ -21,4 +21,3 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_PM) += pm.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/trunk/arch/sh/kernel/cpu/Makefile b/trunk/arch/sh/kernel/cpu/Makefile index 0582e6712b79..fb5dac069382 100644 --- a/trunk/arch/sh/kernel/cpu/Makefile +++ b/trunk/arch/sh/kernel/cpu/Makefile @@ -2,12 +2,11 @@ # Makefile for the Linux/SuperH CPU-specifc backends. # -obj-$(CONFIG_CPU_SH2) = sh2/ -obj-$(CONFIG_CPU_SH2A) = sh2a/ -obj-$(CONFIG_CPU_SH3) = sh3/ -obj-$(CONFIG_CPU_SH4) = sh4/ +obj-y += irq/ init.o clock.o + +obj-$(CONFIG_CPU_SH2) += sh2/ +obj-$(CONFIG_CPU_SH3) += sh3/ +obj-$(CONFIG_CPU_SH4) += sh4/ obj-$(CONFIG_UBC_WAKEUP) += ubc.o obj-$(CONFIG_SH_ADC) += adc.o - -obj-y += irq/ init.o clock.o diff --git a/trunk/arch/sh/kernel/cpu/clock.c b/trunk/arch/sh/kernel/cpu/clock.c index abb586b12565..51ec64cdf348 100644 --- a/trunk/arch/sh/kernel/cpu/clock.c +++ b/trunk/arch/sh/kernel/cpu/clock.c @@ -5,11 +5,9 @@ * * This clock framework is derived from the OMAP version by: * - * Copyright (C) 2004 - 2005 Nokia Corporation + * Copyright (C) 2004 Nokia Corporation * Written by Tuukka Tikkanen * - * Modified for omap shared clock framework by Tony Lindgren - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -22,7 +20,6 @@ #include #include #include -#include #include #include @@ -198,37 +195,17 @@ void clk_recalc_rate(struct clk *clk) propagate_rate(clk); } -/* - * Returns a clock. Note that we first try to use device id on the bus - * and clock name. If this fails, we try to use clock name only. - */ -struct clk *clk_get(struct device *dev, const char *id) +struct clk *clk_get(const char *id) { struct clk *p, *clk = ERR_PTR(-ENOENT); - int idno; - - if (dev == NULL || dev->bus != &platform_bus_type) - idno = -1; - else - idno = to_platform_device(dev)->id; mutex_lock(&clock_list_sem); - list_for_each_entry(p, &clock_list, node) { - if (p->id == idno && - strcmp(id, p->name) == 0 && try_module_get(p->owner)) { - clk = p; - goto found; - } - } - list_for_each_entry(p, &clock_list, node) { if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { clk = p; break; } } - -found: mutex_unlock(&clock_list_sem); return clk; diff --git a/trunk/arch/sh/kernel/cpu/init.c b/trunk/arch/sh/kernel/cpu/init.c index 48121766e8d2..bfb90eb0b7a6 100644 --- a/trunk/arch/sh/kernel/cpu/init.c +++ b/trunk/arch/sh/kernel/cpu/init.c @@ -68,14 +68,12 @@ static void __init cache_init(void) waysize = cpu_data->dcache.sets; -#ifdef CCR_CACHE_ORA /* * If the OC is already in RAM mode, we only have * half of the entries to flush.. */ if (ccr & CCR_CACHE_ORA) waysize >>= 1; -#endif waysize <<= cpu_data->dcache.entry_shift; diff --git a/trunk/arch/sh/kernel/cpu/irq/Makefile b/trunk/arch/sh/kernel/cpu/irq/Makefile index 0049d217561a..1c034c283f59 100644 --- a/trunk/arch/sh/kernel/cpu/irq/Makefile +++ b/trunk/arch/sh/kernel/cpu/irq/Makefile @@ -1,9 +1,8 @@ # # Makefile for the Linux/SuperH CPU-specifc IRQ handlers. # -obj-y += imask.o +obj-y += ipr.o imask.o -obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o diff --git a/trunk/arch/sh/kernel/cpu/irq/imask.c b/trunk/arch/sh/kernel/cpu/irq/imask.c index 301b505c4278..a33ae3e0a5a5 100644 --- a/trunk/arch/sh/kernel/cpu/irq/imask.c +++ b/trunk/arch/sh/kernel/cpu/irq/imask.c @@ -53,10 +53,7 @@ void static inline set_interrupt_registers(int ip) { unsigned long __dummy; - asm volatile( -#ifdef CONFIG_CPU_HAS_SR_RB - "ldc %2, r6_bank\n\t" -#endif + asm volatile("ldc %2, r6_bank\n\t" "stc sr, %0\n\t" "and #0xf0, %0\n\t" "shlr2 %0\n\t" diff --git a/trunk/arch/sh/kernel/cpu/irq/intc2.c b/trunk/arch/sh/kernel/cpu/irq/intc2.c index 74defe76a058..74ca576a7ce5 100644 --- a/trunk/arch/sh/kernel/cpu/irq/intc2.c +++ b/trunk/arch/sh/kernel/cpu/irq/intc2.c @@ -11,29 +11,22 @@ * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780. */ #include -#include +#include #include - -#if defined(CONFIG_CPU_SUBTYPE_SH7760) -#define INTC2_BASE 0xfe080000 -#define INTC2_INTMSK (INTC2_BASE + 0x40) -#define INTC2_INTMSKCLR (INTC2_BASE + 0x60) -#elif defined(CONFIG_CPU_SUBTYPE_SH7780) -#define INTC2_BASE 0xffd40000 -#define INTC2_INTMSK (INTC2_BASE + 0x38) -#define INTC2_INTMSKCLR (INTC2_BASE + 0x3c) -#endif +#include static void disable_intc2_irq(unsigned int irq) { struct intc2_data *p = get_irq_chip_data(irq); - ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset); + ctrl_outl(1 << p->msk_shift, + INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset); } static void enable_intc2_irq(unsigned int irq) { struct intc2_data *p = get_irq_chip_data(irq); - ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset); + ctrl_outl(1 << p->msk_shift, + INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset); } static struct irq_chip intc2_irq_chip = { @@ -68,10 +61,12 @@ void make_intc2_irq(struct intc2_data *table, unsigned int nr_irqs) /* Set the priority level */ local_irq_save(flags); - ipr = ctrl_inl(INTC2_BASE + p->ipr_offset); + ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET + + p->ipr_offset); ipr &= ~(0xf << p->ipr_shift); ipr |= p->priority << p->ipr_shift; - ctrl_outl(ipr, INTC2_BASE + p->ipr_offset); + ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET + + p->ipr_offset); local_irq_restore(flags); diff --git a/trunk/arch/sh/kernel/cpu/irq/ipr.c b/trunk/arch/sh/kernel/cpu/irq/ipr.c index 35eb5751a3aa..a0089563cbfc 100644 --- a/trunk/arch/sh/kernel/cpu/irq/ipr.c +++ b/trunk/arch/sh/kernel/cpu/irq/ipr.c @@ -19,21 +19,25 @@ #include #include #include -#include -#include +#include +#include +#include + static void disable_ipr_irq(unsigned int irq) { struct ipr_data *p = get_irq_chip_data(irq); + int shift = p->shift*4; /* Set the priority in IPR to 0 */ - ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr); + ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr); } static void enable_ipr_irq(unsigned int irq) { struct ipr_data *p = get_irq_chip_data(irq); + int shift = p->shift*4; /* Set priority in IPR back to original value */ - ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr); + ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr); } static struct irq_chip ipr_irq_chip = { @@ -49,10 +53,6 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) for (i = 0; i < nr_irqs; i++) { unsigned int irq = table[i].irq; - table[i].addr = map_ipridx_to_addr(table[i].ipr_idx); - /* could the IPR index be mapped, if not we ignore this */ - if (table[i].addr == 0) - continue; disable_irq_nosync(irq); set_irq_chip_and_handler_name(irq, &ipr_irq_chip, handle_level_irq, "level"); @@ -62,6 +62,83 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) } EXPORT_SYMBOL(make_ipr_irq); +static struct ipr_data sys_ipr_map[] = { +#ifndef CONFIG_CPU_SUBTYPE_SH7780 + { TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY }, + { TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY }, +#ifdef RTC_IRQ + { RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY }, +#endif +#ifdef SCI_ERI_IRQ + { SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, + { SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, + { SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, +#endif +#ifdef SCIF1_ERI_IRQ + { SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, + { SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, + { SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, + { SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7300) + { SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY }, + { DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, + { DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, + { VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY }, +#endif +#ifdef SCIF_ERI_IRQ + { SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, + { SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, + { SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, + { SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, +#endif +#ifdef IRDA_ERI_IRQ + { IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, + { IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, + { IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, + { IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \ + defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705) + /* + * Initialize the Interrupt Controller (INTC) + * registers to their power on values + */ + + /* + * Enable external irq (INTC IRQ mode). + * You should set corresponding bits of PFC to "00" + * to enable these interrupts. + */ + { IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY }, + { IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY }, + { IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY }, + { IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY }, + { IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY }, + { IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY }, +#endif +#endif +}; + +void __init init_IRQ(void) +{ + make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map)); + +#ifdef CONFIG_CPU_HAS_PINT_IRQ + init_IRQ_pint(); +#endif + +#ifdef CONFIG_CPU_HAS_INTC2_IRQ + init_IRQ_intc2(); +#endif + /* Perform the machine specific initialisation */ + if (sh_mv.mv_init_irq != NULL) + sh_mv.mv_init_irq(); + + irq_ctx_init(smp_processor_id()); +} + #if !defined(CONFIG_CPU_HAS_PINT_IRQ) int ipr_irq_demux(int irq) { diff --git a/trunk/arch/sh/kernel/cpu/sh2/Makefile b/trunk/arch/sh/kernel/cpu/sh2/Makefile index f0f059acfcfb..389353fba608 100644 --- a/trunk/arch/sh/kernel/cpu/sh2/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh2/Makefile @@ -2,6 +2,5 @@ # Makefile for the Linux/SuperH SH-2 backends. # -obj-y := ex.o probe.o entry.o +obj-y := probe.o -obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o diff --git a/trunk/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/trunk/arch/sh/kernel/cpu/sh2/clock-sh7619.c deleted file mode 100644 index d0440b269702..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2/clock-sh7619.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh2/clock-sh7619.c - * - * SH7619 support for the clock framework - * - * Copyright (C) 2006 Yoshinori Sato - * - * Based on clock-sh4.c - * Copyright (C) 2005 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include - -const static int pll1rate[]={1,2}; -const static int pfc_divisors[]={1,2,0,4}; - -#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2) -#define PLL2 (4) -#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6) -#define PLL2 (2) -#else -#error "Illigal Clock Mode!" -#endif - -static void master_clk_init(struct clk *clk) -{ - clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; -} - -static struct clk_ops sh7619_master_clk_ops = { - .init = master_clk_init, -}; - -static void module_clk_recalc(struct clk *clk) -{ - int idx = (ctrl_inw(FREQCR) & 0x0007); - clk->rate = clk->parent->rate / pfc_divisors[idx]; -} - -static struct clk_ops sh7619_module_clk_ops = { - .recalc = module_clk_recalc, -}; - -static void bus_clk_recalc(struct clk *clk) -{ - clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; -} - -static struct clk_ops sh7619_bus_clk_ops = { - .recalc = bus_clk_recalc, -}; - -static void cpu_clk_recalc(struct clk *clk) -{ - clk->rate = clk->parent->rate; -} - -static struct clk_ops sh7619_cpu_clk_ops = { - .recalc = cpu_clk_recalc, -}; - -static struct clk_ops *sh7619_clk_ops[] = { - &sh7619_master_clk_ops, - &sh7619_module_clk_ops, - &sh7619_bus_clk_ops, - &sh7619_cpu_clk_ops, -}; - -void __init arch_init_clk_ops(struct clk_ops **ops, int idx) -{ - if (idx < ARRAY_SIZE(sh7619_clk_ops)) - *ops = sh7619_clk_ops[idx]; -} - diff --git a/trunk/arch/sh/kernel/cpu/sh2/entry.S b/trunk/arch/sh/kernel/cpu/sh2/entry.S deleted file mode 100644 index 34d51b3745ea..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2/entry.S +++ /dev/null @@ -1,341 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh2/entry.S - * - * The SH-2 exception entry - * - * Copyright (C) 2005,2006 Yoshinori Sato - * Copyright (C) 2005 AXE,Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include -#include -#include -#include -#include - -/* Offsets to the stack */ -OFF_R0 = 0 /* Return value. New ABI also arg4 */ -OFF_R1 = 4 /* New ABI: arg5 */ -OFF_R2 = 8 /* New ABI: arg6 */ -OFF_R3 = 12 /* New ABI: syscall_nr */ -OFF_R4 = 16 /* New ABI: arg0 */ -OFF_R5 = 20 /* New ABI: arg1 */ -OFF_R6 = 24 /* New ABI: arg2 */ -OFF_R7 = 28 /* New ABI: arg3 */ -OFF_SP = (15*4) -OFF_PC = (16*4) -OFF_SR = (16*4+2*4) -OFF_TRA = (16*4+6*4) - -#include - -ENTRY(exception_handler) - ! already saved r0/r1 - mov.l r2,@-sp - mov.l r3,@-sp - mov r0,r1 - cli - mov.l $cpu_mode,r2 - mov.l @r2,r0 - mov.l @(5*4,r15),r3 ! previous SR - shll2 r3 ! set "S" flag - rotl r0 ! T <- "S" flag - rotl r0 ! "S" flag is LSB - rotcr r3 ! T -> r3:b30 - shlr r3 - shlr r0 - bt/s 1f - mov.l r3,@(5*4,r15) ! copy cpu mode to SR - ! switch to kernel mode - mov #1,r0 - rotr r0 - rotr r0 - mov.l r0,@r2 ! enter kernel mode - mov.l $current_thread_info,r2 - mov.l @r2,r2 - mov #0x20,r0 - shll8 r0 - add r2,r0 - mov r15,r2 ! r2 = user stack top - mov r0,r15 ! switch kernel stack - add #-4,r15 ! dummy - mov.l r1,@-r15 ! TRA - sts.l macl, @-r15 - sts.l mach, @-r15 - stc.l gbr, @-r15 - mov.l @(4*4,r2),r0 - mov.l @(5*4,r2),r1 - mov.l r1,@-r15 ! original SR - sts.l pr,@-r15 - mov.l r0,@-r15 ! original PC - mov r2,r3 - add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame - mov.l r3,@-r15 ! original SP - mov.l r14,@-r15 - mov.l r13,@-r15 - mov.l r12,@-r15 - mov.l r11,@-r15 - mov.l r10,@-r15 - mov.l r9,@-r15 - mov.l r8,@-r15 - mov.l r7,@-r15 - mov.l r6,@-r15 - mov.l r5,@-r15 - mov.l r4,@-r15 - mov r2,r8 ! copy user -> kernel stack - mov.l @r8+,r3 - mov.l r3,@-r15 - mov.l @r8+,r2 - mov.l r2,@-r15 - mov.l @r8+,r1 - mov.l r1,@-r15 - mov.l @r8+,r0 - bra 2f - mov.l r0,@-r15 -1: - ! in kernel exception - mov #(22-4-4-1)*4+4,r0 - mov r15,r2 - sub r0,r15 - mov.l @r2+,r0 ! old R3 - mov.l r0,@-r15 - mov.l @r2+,r0 ! old R2 - mov.l r0,@-r15 - mov.l @r2+,r0 ! old R1 - mov.l r0,@-r15 - mov.l @r2+,r0 ! old R0 - mov.l r0,@-r15 - mov.l @r2+,r3 ! old PC - mov.l @r2+,r0 ! old SR - add #-4,r2 ! exception frame stub (sr) - mov.l r1,@-r2 ! TRA - sts.l macl, @-r2 - sts.l mach, @-r2 - stc.l gbr, @-r2 - mov.l r0,@-r2 ! save old SR - sts.l pr,@-r2 - mov.l r3,@-r2 ! save old PC - mov r2,r0 - add #8*4,r0 - mov.l r0,@-r2 ! save old SP - mov.l r14,@-r2 - mov.l r13,@-r2 - mov.l r12,@-r2 - mov.l r11,@-r2 - mov.l r10,@-r2 - mov.l r9,@-r2 - mov.l r8,@-r2 - mov.l r7,@-r2 - mov.l r6,@-r2 - mov.l r5,@-r2 - mov.l r4,@-r2 - mov.l @(OFF_R0,r15),r0 - mov.l @(OFF_R1,r15),r1 - mov.l @(OFF_R2,r15),r2 - mov.l @(OFF_R3,r15),r3 -2: - mov #OFF_TRA,r8 - add r15,r8 - mov.l @r8,r9 - mov #64,r8 - cmp/hs r8,r9 - bt interrupt_entry ! vec >= 64 is interrupt - mov #32,r8 - cmp/hs r8,r9 - bt trap_entry ! 64 > vec >= 32 is trap - mov.l 4f,r8 - mov r9,r4 - shll2 r9 - add r9,r8 - mov.l @r8,r8 - mov #0,r9 - cmp/eq r9,r8 - bf 3f - mov.l 8f,r8 ! unhandled exception -3: - mov.l 5f,r10 - jmp @r8 - lds r10,pr - -interrupt_entry: - mov r9,r4 - mov.l 6f,r9 - mov.l 7f,r8 - jmp @r8 - lds r9,pr - - .align 2 -4: .long exception_handling_table -5: .long ret_from_exception -6: .long ret_from_irq -7: .long do_IRQ -8: .long do_exception_error - -trap_entry: - add #-0x10,r9 - shll2 r9 ! TRA - mov #OFF_TRA,r8 - add r15,r8 - mov.l r9,@r8 - mov r9,r8 -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r9 - jsr @r9 - nop -#endif - sti - bra system_call - nop - - .align 2 -1: .long syscall_exit -2: .long break_point_trap_software -3: .long NR_syscalls -4: .long sys_call_table -#ifdef CONFIG_TRACE_IRQFLAGS -5: .long trace_hardirqs_on -#endif - -#if defined(CONFIG_SH_STANDARD_BIOS) - /* Unwind the stack and jmp to the debug entry */ -debug_kernel_fw: - mov r15,r0 - add #(22-4)*4-4,r0 - ldc.l @r0+,gbr - lds.l @r0+,mach - lds.l @r0+,macl - mov r15,r0 - mov.l @(OFF_SP,r0),r1 - mov #OFF_SR,r2 - mov.l @(r0,r2),r3 - mov.l r3,@-r1 - mov #OFF_SP,r2 - mov.l @(r0,r2),r3 - mov.l r3,@-r1 - mov r15,r0 - add #(22-4)*4-8,r0 - mov.l 1f,r2 - mov.l @r2,r2 - stc sr,r3 - mov.l r2,@r0 - mov.l r3,@r0 - mov.l r1,@(8,r0) - mov.l @r15+, r0 - mov.l @r15+, r1 - mov.l @r15+, r2 - mov.l @r15+, r3 - mov.l @r15+, r4 - mov.l @r15+, r5 - mov.l @r15+, r6 - mov.l @r15+, r7 - mov.l @r15+, r8 - mov.l @r15+, r9 - mov.l @r15+, r10 - mov.l @r15+, r11 - mov.l @r15+, r12 - mov.l @r15+, r13 - mov.l @r15+, r14 - add #8,r15 - lds.l @r15+, pr - rte - mov.l @r15+,r15 - .align 2 -1: .long gdb_vbr_vector -#endif /* CONFIG_SH_STANDARD_BIOS */ - -ENTRY(address_error_handler) - mov r15,r4 ! regs - add #4,r4 - mov #OFF_PC,r0 - mov.l @(r0,r15),r6 ! pc - mov.l 1f,r0 - jmp @r0 - mov #0,r5 ! writeaccess is unknown - .align 2 - -1: .long do_address_error - -restore_all: - cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 3f, r0 - jsr @r0 - nop -#endif - mov r15,r0 - mov.l $cpu_mode,r2 - mov #OFF_SR,r3 - mov.l @(r0,r3),r1 - mov.l r1,@r2 - shll2 r1 ! clear MD bit - shlr2 r1 - mov.l @(OFF_SP,r0),r2 - add #-8,r2 - mov.l r2,@(OFF_SP,r0) ! point exception frame top - mov.l r1,@(4,r2) ! set sr - mov #OFF_PC,r3 - mov.l @(r0,r3),r1 - mov.l r1,@r2 ! set pc - add #4*16+4,r0 - lds.l @r0+,pr - add #4,r0 ! skip sr - ldc.l @r0+,gbr - lds.l @r0+,mach - lds.l @r0+,macl - get_current_thread_info r0, r1 - mov.l $current_thread_info,r1 - mov.l r0,@r1 - mov.l @r15+,r0 - mov.l @r15+,r1 - mov.l @r15+,r2 - mov.l @r15+,r3 - mov.l @r15+,r4 - mov.l @r15+,r5 - mov.l @r15+,r6 - mov.l @r15+,r7 - mov.l @r15+,r8 - mov.l @r15+,r9 - mov.l @r15+,r10 - mov.l @r15+,r11 - mov.l @r15+,r12 - mov.l @r15+,r13 - mov.l @r15+,r14 - mov.l @r15,r15 - rte - nop -2: - mov.l 1f,r8 - mov.l 2f,r9 - jmp @r9 - lds r8,pr - - .align 2 -$current_thread_info: - .long __current_thread_info -$cpu_mode: - .long __cpu_mode -#ifdef CONFIG_TRACE_IRQFLAGS -3: .long trace_hardirqs_off -#endif - -! common exception handler -#include "../../entry-common.S" - - .data -! cpu operation mode -! bit30 = MD (compatible SH3/4) -__cpu_mode: - .long 0x40000000 - - .section .bss -__current_thread_info: - .long 0 - -ENTRY(exception_handling_table) - .space 4*32 diff --git a/trunk/arch/sh/kernel/cpu/sh2/ex.S b/trunk/arch/sh/kernel/cpu/sh2/ex.S deleted file mode 100644 index 6d285af7846c..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2/ex.S +++ /dev/null @@ -1,46 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh2/ex.S - * - * The SH-2 exception vector table - * - * Copyright (C) 2005 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include - -! -! convert Exception Vector to Exception Number -! -exception_entry: -no = 0 - .rept 256 - mov.l r0,@-sp - mov #no,r0 - bra exception_trampoline - and #0xff,r0 -no = no + 1 - .endr -exception_trampoline: - mov.l r1,@-sp - mov.l $exception_handler,r1 - jmp @r1 - - .align 2 -$exception_entry: - .long exception_entry -$exception_handler: - .long exception_handler -! -! Exception Vector Base -! - .align 2 -ENTRY(vbr_base) -vector = 0 - .rept 256 - .long exception_entry + vector * 8 -vector = vector + 1 - .endr diff --git a/trunk/arch/sh/kernel/cpu/sh2/probe.c b/trunk/arch/sh/kernel/cpu/sh2/probe.c index ba527d9b5024..f17a2a0d588e 100644 --- a/trunk/arch/sh/kernel/cpu/sh2/probe.c +++ b/trunk/arch/sh/kernel/cpu/sh2/probe.c @@ -17,23 +17,17 @@ int __init detect_cpu_and_cache_system(void) { -#if defined(CONFIG_CPU_SUBTYPE_SH7604) + /* + * For now, assume SH7604 .. fix this later. + */ cpu_data->type = CPU_SH7604; cpu_data->dcache.ways = 4; - cpu_data->dcache.way_incr = (1<<10); + cpu_data->dcache.way_shift = 6; cpu_data->dcache.sets = 64; cpu_data->dcache.entry_shift = 4; cpu_data->dcache.linesz = L1_CACHE_BYTES; cpu_data->dcache.flags = 0; -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) - cpu_data->type = CPU_SH7619; - cpu_data->dcache.ways = 4; - cpu_data->dcache.way_incr = (1<<12); - cpu_data->dcache.sets = 256; - cpu_data->dcache.entry_shift = 4; - cpu_data->dcache.linesz = L1_CACHE_BYTES; - cpu_data->dcache.flags = 0; -#endif + /* * SH-2 doesn't have separate caches */ diff --git a/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c deleted file mode 100644 index 82c2d905152f..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SH7619 Setup - * - * Copyright (C) 2006 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include - -static struct plat_sci_port sci_platform_data[] = { - { - .mapbase = 0xf8400000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 88, 89, 91, 90}, - }, { - .mapbase = 0xf8410000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 92, 93, 95, 94}, - }, { - .mapbase = 0xf8420000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 96, 97, 99, 98}, - }, { - .flags = 0, - } -}; - -static struct platform_device sci_device = { - .name = "sh-sci", - .id = -1, - .dev = { - .platform_data = sci_platform_data, - }, -}; - -static struct platform_device *sh7619_devices[] __initdata = { - &sci_device, -}; - -static int __init sh7619_devices_setup(void) -{ - return platform_add_devices(sh7619_devices, - ARRAY_SIZE(sh7619_devices)); -} -__initcall(sh7619_devices_setup); diff --git a/trunk/arch/sh/kernel/cpu/sh2a/Makefile b/trunk/arch/sh/kernel/cpu/sh2a/Makefile deleted file mode 100644 index 350972ae9410..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2a/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# -# Makefile for the Linux/SuperH SH-2A backends. -# - -obj-y := common.o probe.o - -common-y += $(addprefix ../sh2/, ex.o) -common-y += $(addprefix ../sh2/, entry.o) - -obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o diff --git a/trunk/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/trunk/arch/sh/kernel/cpu/sh2a/clock-sh7206.c deleted file mode 100644 index a9ad309c6a33..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2a/clock-sh7206.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh2a/clock-sh7206.c - * - * SH7206 support for the clock framework - * - * Copyright (C) 2006 Yoshinori Sato - * - * Based on clock-sh4.c - * Copyright (C) 2005 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include - -const static int pll1rate[]={1,2,3,4,6,8}; -const static int pfc_divisors[]={1,2,3,4,6,8,12}; -#define ifc_divisors pfc_divisors - -#if (CONFIG_SH_CLK_MD == 2) -#define PLL2 (4) -#elif (CONFIG_SH_CLK_MD == 6) -#define PLL2 (2) -#elif (CONFIG_SH_CLK_MD == 7) -#define PLL2 (1) -#else -#error "Illigal Clock Mode!" -#endif - -static void master_clk_init(struct clk *clk) -{ - clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; -} - -static struct clk_ops sh7206_master_clk_ops = { - .init = master_clk_init, -}; - -static void module_clk_recalc(struct clk *clk) -{ - int idx = (ctrl_inw(FREQCR) & 0x0007); - clk->rate = clk->parent->rate / pfc_divisors[idx]; -} - -static struct clk_ops sh7206_module_clk_ops = { - .recalc = module_clk_recalc, -}; - -static void bus_clk_recalc(struct clk *clk) -{ - clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; -} - -static struct clk_ops sh7206_bus_clk_ops = { - .recalc = bus_clk_recalc, -}; - -static void cpu_clk_recalc(struct clk *clk) -{ - int idx = (ctrl_inw(FREQCR) & 0x0007); - clk->rate = clk->parent->rate / ifc_divisors[idx]; -} - -static struct clk_ops sh7206_cpu_clk_ops = { - .recalc = cpu_clk_recalc, -}; - -static struct clk_ops *sh7206_clk_ops[] = { - &sh7206_master_clk_ops, - &sh7206_module_clk_ops, - &sh7206_bus_clk_ops, - &sh7206_cpu_clk_ops, -}; - -void __init arch_init_clk_ops(struct clk_ops **ops, int idx) -{ - if (idx < ARRAY_SIZE(sh7206_clk_ops)) - *ops = sh7206_clk_ops[idx]; -} - diff --git a/trunk/arch/sh/kernel/cpu/sh2a/probe.c b/trunk/arch/sh/kernel/cpu/sh2a/probe.c deleted file mode 100644 index 87c6c0542089..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2a/probe.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh2a/probe.c - * - * CPU Subtype Probing for SH-2A. - * - * Copyright (C) 2004, 2005 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include - -int __init detect_cpu_and_cache_system(void) -{ - /* Just SH7206 for now .. */ - cpu_data->type = CPU_SH7206; - - cpu_data->dcache.ways = 4; - cpu_data->dcache.way_incr = (1 << 11); - cpu_data->dcache.sets = 128; - cpu_data->dcache.entry_shift = 4; - cpu_data->dcache.linesz = L1_CACHE_BYTES; - cpu_data->dcache.flags = 0; - - /* - * The icache is the same as the dcache as far as this setup is - * concerned. The only real difference in hardware is that the icache - * lacks the U bit that the dcache has, none of this has any bearing - * on the cache info. - */ - cpu_data->icache = cpu_data->dcache; - - return 0; -} - diff --git a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c deleted file mode 100644 index cdfeef49e62e..000000000000 --- a/trunk/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SH7206 Setup - * - * Copyright (C) 2006 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include - -static struct plat_sci_port sci_platform_data[] = { - { - .mapbase = 0xfffe8000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 240, 241, 242, 243}, - }, { - .mapbase = 0xfffe8800, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 244, 245, 246, 247}, - }, { - .mapbase = 0xfffe9000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 248, 249, 250, 251}, - }, { - .mapbase = 0xfffe9800, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 252, 253, 254, 255}, - }, { - .flags = 0, - } -}; - -static struct platform_device sci_device = { - .name = "sh-sci", - .id = -1, - .dev = { - .platform_data = sci_platform_data, - }, -}; - -static struct platform_device *sh7206_devices[] __initdata = { - &sci_device, -}; - -static int __init sh7206_devices_setup(void) -{ - return platform_add_devices(sh7206_devices, - ARRAY_SIZE(sh7206_devices)); -} -__initcall(sh7206_devices_setup); diff --git a/trunk/arch/sh/kernel/cpu/sh3/Makefile b/trunk/arch/sh/kernel/cpu/sh3/Makefile index 83905e4e4387..58d3815695ff 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh3/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux/SuperH SH-3 backends. # -obj-y := ex.o probe.o entry.o +obj-y := ex.o probe.o # CPU subtype setup obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o diff --git a/trunk/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/trunk/arch/sh/kernel/cpu/sh3/clock-sh7709.c index b791a29fdb62..10461a745e5f 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/trunk/arch/sh/kernel/cpu/sh3/clock-sh7709.c @@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; static void set_bus_parent(struct clk *clk) { - struct clk *bus_clk = clk_get(NULL, "bus_clk"); + struct clk *bus_clk = clk_get("bus_clk"); clk->parent = bus_clk; clk_put(bus_clk); } diff --git a/trunk/arch/sh/kernel/cpu/sh4/Makefile b/trunk/arch/sh/kernel/cpu/sh4/Makefile index 6e415baf04b4..8dbf3895ece7 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh4/Makefile @@ -2,8 +2,7 @@ # Makefile for the Linux/SuperH SH-4 backends. # -obj-y := ex.o probe.o common.o -common-y += $(addprefix ../sh3/, entry.o) +obj-y := ex.o probe.o obj-$(CONFIG_SH_FPU) += fpu.o obj-$(CONFIG_SH_STORE_QUEUES) += sq.o diff --git a/trunk/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/trunk/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index fa2019aabd74..bfdf5fe8d948 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/trunk/arch/sh/kernel/cpu/sh4/clock-sh4-202.c @@ -97,7 +97,7 @@ static void shoc_clk_recalc(struct clk *clk) static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) { - struct clk *bclk = clk_get(NULL, "bus_clk"); + struct clk *bclk = clk_get("bus_clk"); unsigned long bclk_rate = clk_get_rate(bclk); clk_put(bclk); @@ -151,7 +151,7 @@ static struct clk *sh4202_onchip_clocks[] = { static int __init sh4202_clk_init(void) { - struct clk *clk = clk_get(NULL, "master_clk"); + struct clk *clk = clk_get("master_clk"); int i; for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { diff --git a/trunk/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/trunk/arch/sh/kernel/cpu/sh4/clock-sh7780.c index 9e6a216750c8..93ad367342c9 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/clock-sh7780.c +++ b/trunk/arch/sh/kernel/cpu/sh4/clock-sh7780.c @@ -98,7 +98,7 @@ static struct clk *sh7780_onchip_clocks[] = { static int __init sh7780_clk_init(void) { - struct clk *clk = clk_get(NULL, "master_clk"); + struct clk *clk = clk_get("master_clk"); int i; for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) { diff --git a/trunk/arch/sh/kernel/cpu/sh4/fpu.c b/trunk/arch/sh/kernel/cpu/sh4/fpu.c index 7624677f6628..f486c07e10e2 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/fpu.c +++ b/trunk/arch/sh/kernel/cpu/sh4/fpu.c @@ -282,8 +282,11 @@ ieee_fpe_handler (struct pt_regs *regs) grab_fpu(regs); restore_fpu(tsk); set_tsk_thread_flag(tsk, TIF_USEDFPU); - } else + } else { + tsk->thread.trap_no = 11; + tsk->thread.error_code = 0; force_sig(SIGFPE, tsk); + } regs->pc = nextpc; return 1; @@ -293,29 +296,29 @@ ieee_fpe_handler (struct pt_regs *regs) } asmlinkage void -do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, - unsigned long r7, struct pt_regs __regs) +do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct task_struct *tsk = current; - if (ieee_fpe_handler(regs)) + if (ieee_fpe_handler (®s)) return; - regs->pc += 2; - save_fpu(tsk, regs); + regs.pc += 2; + save_fpu(tsk, ®s); + tsk->thread.trap_no = 11; + tsk->thread.error_code = 0; force_sig(SIGFPE, tsk); } asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6, - unsigned long r7, struct pt_regs __regs) + unsigned long r7, struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct task_struct *tsk = current; - grab_fpu(regs); - if (!user_mode(regs)) { + grab_fpu(®s); + if (!user_mode(®s)) { printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); return; } diff --git a/trunk/arch/sh/kernel/cpu/sh4/probe.c b/trunk/arch/sh/kernel/cpu/sh4/probe.c index afe0f1b1c030..c294de1e14a3 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/probe.c +++ b/trunk/arch/sh/kernel/cpu/sh4/probe.c @@ -79,16 +79,16 @@ int __init detect_cpu_and_cache_system(void) case 0x205: cpu_data->type = CPU_SH7750; cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER; + CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; break; case 0x206: cpu_data->type = CPU_SH7750S; cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER; + CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; break; case 0x1100: cpu_data->type = CPU_SH7751; - cpu_data->flags |= CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x2000: cpu_data->type = CPU_SH73180; @@ -126,22 +126,23 @@ int __init detect_cpu_and_cache_system(void) break; case 0x8000: cpu_data->type = CPU_ST40RA; - cpu_data->flags |= CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x8100: cpu_data->type = CPU_ST40GX1; - cpu_data->flags |= CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x700: cpu_data->type = CPU_SH4_501; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; + cpu_data->flags |= CPU_HAS_PTEA; break; case 0x600: cpu_data->type = CPU_SH4_202; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - cpu_data->flags |= CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x500 ... 0x501: switch (prr) { @@ -159,7 +160,7 @@ int __init detect_cpu_and_cache_system(void) cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - cpu_data->flags |= CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; default: @@ -172,10 +173,6 @@ int __init detect_cpu_and_cache_system(void) cpu_data->dcache.ways = 1; #endif -#ifdef CONFIG_CPU_HAS_PTEA - cpu_data->flags |= CPU_HAS_PTEA; -#endif - /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c index bbcb06f18b04..50812d57c1c1 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -2,7 +2,6 @@ * SH7750/SH7751 Setup * * Copyright (C) 2006 Paul Mundt - * Copyright (C) 2006 Jamie Lenehan * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -11,7 +10,6 @@ #include #include #include -#include #include static struct plat_sci_port sci_platform_data[] = { @@ -48,71 +46,3 @@ static int __init sh7750_devices_setup(void) ARRAY_SIZE(sh7750_devices)); } __initcall(sh7750_devices_setup); - -static struct ipr_data sh7750_ipr_map[] = { - /* IRQ, IPR-idx, shift, priority */ - { 16, 0, 12, 2 }, /* TMU0 TUNI*/ - { 17, 0, 12, 2 }, /* TMU1 TUNI */ - { 18, 0, 4, 2 }, /* TMU2 TUNI */ - { 19, 0, 4, 2 }, /* TMU2 TIPCI */ - { 27, 1, 12, 2 }, /* WDT ITI */ - { 20, 0, 0, 2 }, /* RTC ATI (alarm) */ - { 21, 0, 0, 2 }, /* RTC PRI (period) */ - { 22, 0, 0, 2 }, /* RTC CUI (carry) */ - { 23, 1, 4, 3 }, /* SCI ERI */ - { 24, 1, 4, 3 }, /* SCI RXI */ - { 25, 1, 4, 3 }, /* SCI TXI */ - { 40, 2, 4, 3 }, /* SCIF ERI */ - { 41, 2, 4, 3 }, /* SCIF RXI */ - { 42, 2, 4, 3 }, /* SCIF BRI */ - { 43, 2, 4, 3 }, /* SCIF TXI */ - { 34, 2, 8, 7 }, /* DMAC DMTE0 */ - { 35, 2, 8, 7 }, /* DMAC DMTE1 */ - { 36, 2, 8, 7 }, /* DMAC DMTE2 */ - { 37, 2, 8, 7 }, /* DMAC DMTE3 */ - { 28, 2, 8, 7 }, /* DMAC DMAE */ -}; - -static struct ipr_data sh7751_ipr_map[] = { - { 44, 2, 8, 7 }, /* DMAC DMTE4 */ - { 45, 2, 8, 7 }, /* DMAC DMTE5 */ - { 46, 2, 8, 7 }, /* DMAC DMTE6 */ - { 47, 2, 8, 7 }, /* DMAC DMTE7 */ - /* The following use INTC_INPRI00 for masking, which is a 32-bit - register, not a 16-bit register like the IPRx registers, so it - would need special support */ - /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */ - /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */ -}; - -static unsigned long ipr_offsets[] = { - 0xffd00004UL, /* 0: IPRA */ - 0xffd00008UL, /* 1: IPRB */ - 0xffd0000cUL, /* 2: IPRC */ - 0xffd00010UL, /* 3: IPRD */ -}; - -/* given the IPR index return the address of the IPR register */ -unsigned int map_ipridx_to_addr(int idx) -{ - if (idx >= ARRAY_SIZE(ipr_offsets)) - return 0; - return ipr_offsets[idx]; -} - -#define INTC_ICR 0xffd00000UL -#define INTC_ICR_IRLM (1<<7) - -/* enable individual interrupt mode for external interupts */ -void ipr_irq_enable_irlm(void) -{ - ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); -} - -void __init init_IRQ_ipr() -{ - make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map)); -#ifdef CONFIG_CPU_SUBTYPE_SH7751 - make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map)); -#endif -} diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7780.c index 9aeaa2ddaa28..814ddb226531 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7780.c +++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7780.c @@ -79,27 +79,25 @@ static int __init sh7780_devices_setup(void) __initcall(sh7780_devices_setup); static struct intc2_data intc2_irq_table[] = { - { 28, 0, 24, 0, 0, 2 }, /* TMU0 */ + { TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 }, + { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY }, + { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY }, + { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY }, + { SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, + { SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, + { SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, + { SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, - { 21, 1, 0, 0, 2, 2 }, - { 22, 1, 1, 0, 2, 2 }, - { 23, 1, 2, 0, 2, 2 }, + { SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, + { SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, + { SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, + { SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, - { 40, 8, 24, 0, 3, 3 }, /* SCIF0 ERI */ - { 41, 8, 24, 0, 3, 3 }, /* SCIF0 RXI */ - { 42, 8, 24, 0, 3, 3 }, /* SCIF0 BRI */ - { 43, 8, 24, 0, 3, 3 }, /* SCIF0 TXI */ - - { 76, 8, 16, 0, 4, 3 }, /* SCIF1 ERI */ - { 77, 8, 16, 0, 4, 3 }, /* SCIF1 RXI */ - { 78, 8, 16, 0, 4, 3 }, /* SCIF1 BRI */ - { 79, 8, 16, 0, 4, 3 }, /* SCIF1 TXI */ - - { 64, 0x10, 8, 0, 14, 2 }, /* PCIC0 */ - { 65, 0x10, 0, 0, 15, 2 }, /* PCIC1 */ - { 66, 0x14, 24, 0, 16, 2 }, /* PCIC2 */ - { 67, 0x14, 16, 0, 17, 2 }, /* PCIC3 */ - { 68, 0x14, 8, 0, 18, 2 }, /* PCIC4 */ + { PCIC0_IRQ, 0x10, 8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY }, + { PCIC1_IRQ, 0x10, 0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY }, + { PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY }, + { PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY }, + { PCIC4_IRQ, 0x14, 8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY }, }; void __init init_IRQ_intc2(void) diff --git a/trunk/arch/sh/kernel/cpu/sh4/sq.c b/trunk/arch/sh/kernel/cpu/sh4/sq.c index 0c9ea38d2caa..7bcc73f9b8df 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/sq.c +++ b/trunk/arch/sh/kernel/cpu/sh4/sq.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include @@ -38,7 +38,7 @@ struct sq_mapping { static struct sq_mapping *sq_mapping_list; static DEFINE_SPINLOCK(sq_mapping_lock); -static struct kmem_cache *sq_cache; +static kmem_cache_t *sq_cache; static unsigned long *sq_bitmap; #define store_queue_barrier() \ @@ -67,7 +67,6 @@ void sq_flush_range(unsigned long start, unsigned int len) /* Wait for completion */ store_queue_barrier(); } -EXPORT_SYMBOL(sq_flush_range); static inline void sq_mapping_list_add(struct sq_mapping *map) { @@ -167,7 +166,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->size = size; map->name = name; - page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, + page = bitmap_find_free_region(sq_bitmap, 0x04000000, get_order(map->size)); if (unlikely(page < 0)) { ret = -ENOSPC; @@ -194,7 +193,6 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, kmem_cache_free(sq_cache, map); return ret; } -EXPORT_SYMBOL(sq_remap); /** * sq_unmap - Unmap a Store Queue allocation @@ -236,7 +234,6 @@ void sq_unmap(unsigned long vaddr) kmem_cache_free(sq_cache, map); } -EXPORT_SYMBOL(sq_unmap); /* * Needlessly complex sysfs interface. Unfortunately it doesn't seem like @@ -405,3 +402,7 @@ module_exit(sq_api_exit); MODULE_AUTHOR("Paul Mundt , M. R. Brown "); MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(sq_remap); +EXPORT_SYMBOL(sq_unmap); +EXPORT_SYMBOL(sq_flush_range); diff --git a/trunk/arch/sh/kernel/early_printk.c b/trunk/arch/sh/kernel/early_printk.c index 60340823798a..a00022722e9e 100644 --- a/trunk/arch/sh/kernel/early_printk.c +++ b/trunk/arch/sh/kernel/early_printk.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #ifdef CONFIG_SH_STANDARD_BIOS #include @@ -62,9 +62,17 @@ static struct console bios_console = { #include #include "../../../drivers/serial/sh-sci.h" +#ifdef CONFIG_CPU_SH4 +#define SCIF_REG 0xffe80000 +#elif defined(CONFIG_CPU_SUBTYPE_SH72060) +#define SCIF_REG 0xfffe9800 +#else +#error "Undefined SCIF for this subtype" +#endif + static struct uart_port scif_port = { - .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, - .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, + .mapbase = SCIF_REG, + .membase = (char __iomem *)SCIF_REG, }; static void scif_sercon_putc(int c) @@ -105,29 +113,23 @@ static struct console scif_console = { .index = -1, }; -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) -/* - * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 - * devices that aren't using sh-ipl+g. - */ static void scif_sercon_init(int baud) { - ctrl_outw(0, scif_port.mapbase + 8); - ctrl_outw(0, scif_port.mapbase); + ctrl_outw(0, SCIF_REG + 8); + ctrl_outw(0, SCIF_REG); /* Set baud rate */ ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) / - (32 * baud) - 1, scif_port.mapbase + 4); - - ctrl_outw(12, scif_port.mapbase + 24); - ctrl_outw(8, scif_port.mapbase + 24); - ctrl_outw(0, scif_port.mapbase + 32); - ctrl_outw(0x60, scif_port.mapbase + 16); - ctrl_outw(0, scif_port.mapbase + 36); - ctrl_outw(0x30, scif_port.mapbase + 8); + (32 * baud) - 1, SCIF_REG + 4); + + ctrl_outw(12, SCIF_REG + 24); + ctrl_outw(8, SCIF_REG + 24); + ctrl_outw(0, SCIF_REG + 32); + ctrl_outw(0x60, SCIF_REG + 16); + ctrl_outw(0, SCIF_REG + 36); + ctrl_outw(0x30, SCIF_REG + 8); } -#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */ -#endif /* CONFIG_EARLY_SCIF_CONSOLE */ +#endif /* * Setup a default console, if more than one is compiled in, rely on the @@ -166,7 +168,7 @@ int __init setup_early_printk(char *opt) if (!strncmp(buf, "serial", 6)) { early_console = &scif_console; -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) +#ifdef CONFIG_CPU_SH4 scif_sercon_init(115200); #endif } diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S deleted file mode 100644 index 29136a35d7c7..000000000000 --- a/trunk/arch/sh/kernel/entry-common.S +++ /dev/null @@ -1,433 +0,0 @@ -/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $ - * - * linux/arch/sh/entry.S - * - * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - */ - -! NOTE: -! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address -! to be jumped is too far, but it causes illegal slot exception. - -/* - * entry.S contains the system-call and fault low-level handling routines. - * This also contains the timer-interrupt handler, as well as all interrupts - * and faults that can result in a task-switch. - * - * NOTE: This code handles signal-recognition, which happens every time - * after a timer-interrupt and after each system call. - * - * NOTE: This code uses a convention that instructions in the delay slot - * of a transfer-control instruction are indented by an extra space, thus: - * - * jmp @k0 ! control-transfer instruction - * ldc k1, ssr ! delay slot - * - * Stack layout in 'ret_from_syscall': - * ptrace needs to have all regs on the stack. - * if the order here is changed, it needs to be - * updated in ptrace.c and ptrace.h - * - * r0 - * ... - * r15 = stack pointer - * spc - * pr - * ssr - * gbr - * mach - * macl - * syscall # - * - */ - -#if defined(CONFIG_PREEMPT) -# define preempt_stop() cli -#else -# define preempt_stop() -# define resume_kernel __restore_all -#endif - -#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) -! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. -! If both are configured, handle the debug traps (breakpoints) in SW, -! but still allow BIOS traps to FW. - - .align 2 -debug_kernel: -#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) - /* Force BIOS call to FW (debug_trap put TRA in r8) */ - mov r8,r0 - shlr2 r0 - cmp/eq #0x3f,r0 - bt debug_kernel_fw -#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ - -debug_enter: -#if defined(CONFIG_SH_KGDB) - /* Jump to kgdb, pass stacked regs as arg */ -debug_kernel_sw: - mov.l 3f, r0 - jmp @r0 - mov r15, r4 - .align 2 -3: .long kgdb_handle_exception -#endif /* CONFIG_SH_KGDB */ - -#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ - - - .align 2 -debug_trap: -#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - shll r0 - shll r0 ! kernel space? - bt/s debug_kernel -#endif - mov.l @r15, r0 ! Restore R0 value - mov.l 1f, r8 - jmp @r8 - nop - - .align 2 -ENTRY(exception_error) - ! -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 3f, r0 - jsr @r0 - nop -#endif - sti - mov.l 2f, r0 - jmp @r0 - nop - -! - .align 2 -1: .long break_point_trap_software -2: .long do_exception_error -#ifdef CONFIG_TRACE_IRQFLAGS -3: .long trace_hardirqs_on -#endif - - .align 2 -ret_from_exception: - preempt_stop() -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 4f, r0 - jsr @r0 - nop -#endif -ENTRY(ret_from_irq) - ! - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - shll r0 - shll r0 ! kernel space? - get_current_thread_info r8, r0 - bt resume_kernel ! Yes, it's from kernel, go back soon - -#ifdef CONFIG_PREEMPT - bra resume_userspace - nop -ENTRY(resume_kernel) - mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count - tst r0, r0 - bf noresched -need_resched: - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_NEED_RESCHED, r0 ! need_resched set? - bt noresched - - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - and #0xf0, r0 ! interrupts off (exception path)? - cmp/eq #0xf0, r0 - bt noresched - - mov.l 1f, r0 - mov.l r0, @(TI_PRE_COUNT,r8) - -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 3f, r0 - jsr @r0 - nop -#endif - sti - mov.l 2f, r0 - jsr @r0 - nop - mov #0, r0 - mov.l r0, @(TI_PRE_COUNT,r8) - cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 4f, r0 - jsr @r0 - nop -#endif - - bra need_resched - nop - -noresched: - bra __restore_all - nop - - .align 2 -1: .long PREEMPT_ACTIVE -2: .long schedule -#ifdef CONFIG_TRACE_IRQFLAGS -3: .long trace_hardirqs_on -4: .long trace_hardirqs_off -#endif -#endif - -ENTRY(resume_userspace) - ! r8: current_thread_info - cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 - bt/s __restore_all - tst #_TIF_NEED_RESCHED, r0 - - .align 2 -work_pending: - ! r0: current_thread_info->flags - ! r8: current_thread_info - ! t: result of "tst #_TIF_NEED_RESCHED, r0" - bf/s work_resched - tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 -work_notifysig: - bt/s __restore_all - mov r15, r4 - mov r12, r5 ! set arg1(save_r0) - mov r0, r6 - mov.l 2f, r1 - mov.l 3f, r0 - jmp @r1 - lds r0, pr -work_resched: -#ifndef CONFIG_PREEMPT - ! gUSA handling - mov.l @(OFF_SP,r15), r0 ! get user space stack pointer - mov r0, r1 - shll r0 - bf/s 1f - shll r0 - bf/s 1f - mov #OFF_PC, r0 - ! SP >= 0xc0000000 : gUSA mark - mov.l @(r0,r15), r2 ! get user space PC (program counter) - mov.l @(OFF_R0,r15), r3 ! end point - cmp/hs r3, r2 ! r2 >= r3? - bt 1f - add r3, r1 ! rewind point #2 - mov.l r1, @(r0,r15) ! reset PC to rewind point #2 - ! -1: -#endif - mov.l 1f, r1 - jsr @r1 ! schedule - nop - cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif - ! - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 - bt __restore_all - bra work_pending - tst #_TIF_NEED_RESCHED, r0 - - .align 2 -1: .long schedule -2: .long do_notify_resume -3: .long restore_all -#ifdef CONFIG_TRACE_IRQFLAGS -4: .long trace_hardirqs_on -5: .long trace_hardirqs_off -#endif - - .align 2 -syscall_exit_work: - ! r0: current_thread_info->flags - ! r8: current_thread_info - tst #_TIF_SYSCALL_TRACE, r0 - bt/s work_pending - tst #_TIF_NEED_RESCHED, r0 -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif - sti - ! XXX setup arguments... - mov.l 4f, r0 ! do_syscall_trace - jsr @r0 - nop - bra resume_userspace - nop - - .align 2 -syscall_trace_entry: - ! Yes it is traced. - ! XXX setup arguments... - mov.l 4f, r11 ! Call do_syscall_trace which notifies - jsr @r11 ! superior (will chomp R[0-7]) - nop - ! Reload R0-R4 from kernel stack, where the - ! parent may have modified them using - ! ptrace(POKEUSR). (Note that R0-R2 are - ! used by the system call handler directly - ! from the kernel stack anyway, so don't need - ! to be reloaded here.) This allows the parent - ! to rewrite system calls and args on the fly. - mov.l @(OFF_R4,r15), r4 ! arg0 - mov.l @(OFF_R5,r15), r5 - mov.l @(OFF_R6,r15), r6 - mov.l @(OFF_R7,r15), r7 ! arg3 - mov.l @(OFF_R3,r15), r3 ! syscall_nr - ! - mov.l 2f, r10 ! Number of syscalls - cmp/hs r10, r3 - bf syscall_call - mov #-ENOSYS, r0 - bra syscall_exit - mov.l r0, @(OFF_R0,r15) ! Return value - -__restore_all: - mov.l 1f, r0 - jmp @r0 - nop - - .align 2 -1: .long restore_all - - .align 2 -not_syscall_tra: - bra debug_trap - nop - - .align 2 -syscall_badsys: ! Bad syscall number - mov #-ENOSYS, r0 - bra resume_userspace - mov.l r0, @(OFF_R0,r15) ! Return value - - -/* - * Syscall interface: - * - * Syscall #: R3 - * Arguments #0 to #3: R4--R7 - * Arguments #4 to #6: R0, R1, R2 - * TRA: (number of arguments + 0x10) x 4 - * - * This code also handles delegating other traps to the BIOS/gdb stub - * according to: - * - * Trap number - * (TRA>>2) Purpose - * -------- ------- - * 0x0-0xf old syscall ABI - * 0x10-0x1f new syscall ABI - * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. - * - * Note: When we're first called, the TRA value must be shifted - * right 2 bits in order to get the value that was used as the "trapa" - * argument. - */ - - .align 2 - .globl ret_from_fork -ret_from_fork: - mov.l 1f, r8 - jsr @r8 - mov r0, r4 - bra syscall_exit - nop - .align 2 -1: .long schedule_tail - ! -ENTRY(system_call) -#if !defined(CONFIG_CPU_SH2) - mov.l 1f, r9 - mov.l @r9, r8 ! Read from TRA (Trap Address) Register -#endif - ! - ! Is the trap argument >= 0x20? (TRA will be >= 0x80) - mov #0x7f, r9 - cmp/hi r9, r8 - bt/s not_syscall_tra - mov #OFF_TRA, r9 - add r15, r9 - mov.l r8, @r9 ! set TRA value to tra -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r10 - jsr @r10 - nop -#endif - sti - - ! - get_current_thread_info r8, r10 - mov.l @(TI_FLAGS,r8), r8 - mov #_TIF_SYSCALL_TRACE, r10 - tst r10, r8 - bf syscall_trace_entry - ! - mov.l 2f, r8 ! Number of syscalls - cmp/hs r8, r3 - bt syscall_badsys - ! -syscall_call: - shll2 r3 ! x4 - mov.l 3f, r8 ! Load the address of sys_call_table - add r8, r3 - mov.l @r3, r8 - jsr @r8 ! jump to specific syscall handler - nop - mov.l @(OFF_R0,r15), r12 ! save r0 - mov.l r0, @(OFF_R0,r15) ! save the return value - ! -syscall_exit: - cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 6f, r0 - jsr @r0 - nop -#endif - ! - get_current_thread_info r8, r0 - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_ALLWORK_MASK, r0 - bf syscall_exit_work - bra __restore_all - nop - .align 2 -#if !defined(CONFIG_CPU_SH2) -1: .long TRA -#endif -2: .long NR_syscalls -3: .long sys_call_table -4: .long do_syscall_trace -#ifdef CONFIG_TRACE_IRQFLAGS -5: .long trace_hardirqs_on -6: .long trace_hardirqs_off -#endif diff --git a/trunk/arch/sh/kernel/cpu/sh3/entry.S b/trunk/arch/sh/kernel/entry.S similarity index 58% rename from trunk/arch/sh/kernel/cpu/sh3/entry.S rename to trunk/arch/sh/kernel/entry.S index 8c0dc2700c69..39aaefb2d83f 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/entry.S +++ b/trunk/arch/sh/kernel/entry.S @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/entry.S + * linux/arch/sh/entry.S * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2003 - 2006 Paul Mundt @@ -7,16 +7,15 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. + * */ #include #include #include #include #include -#include #include -#include -#include +#include ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address @@ -82,8 +81,6 @@ OFF_TRA = (16*4+6*4) #define k_g_imask r6_bank /* r6_bank1 */ #define current r7 /* r7_bank1 */ -#include - /* * Kernel mode register usage: * k0 scratch @@ -110,6 +107,26 @@ OFF_TRA = (16*4+6*4) ! this first version depends *much* on C implementation. ! +#define CLI() \ + stc sr, r0; \ + or #0xf0, r0; \ + ldc r0, sr + +#define STI() \ + mov.l __INV_IMASK, r11; \ + stc sr, r10; \ + and r11, r10; \ + stc k_g_imask, r11; \ + or r11, r10; \ + ldc r10, sr + +#if defined(CONFIG_PREEMPT) +# define preempt_stop() CLI() +#else +# define preempt_stop() +# define resume_kernel restore_all +#endif + #if defined(CONFIG_MMU) .align 2 ENTRY(tlb_miss_load) @@ -138,14 +155,29 @@ ENTRY(tlb_protection_violation_store) call_dpf: mov.l 1f, r0 - mov.l @r0, r6 ! address + mov r5, r8 + mov.l @r0, r6 + mov r6, r9 + mov.l 2f, r0 + sts pr, r10 + jsr @r0 + mov r15, r4 + ! + tst r0, r0 + bf/s 0f + lds r10, pr + rts + nop +0: STI() mov.l 3f, r0 - + mov r9, r6 + mov r8, r5 jmp @r0 - mov r15, r4 ! regs + mov r15, r4 .align 2 1: .long MMU_TEA +2: .long __do_page_fault 3: .long do_page_fault .align 2 @@ -171,6 +203,32 @@ call_dae: 2: .long do_address_error #endif /* CONFIG_MMU */ +#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) +! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. +! If both are configured, handle the debug traps (breakpoints) in SW, +! but still allow BIOS traps to FW. + + .align 2 +debug_kernel: +#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) + /* Force BIOS call to FW (debug_trap put TRA in r8) */ + mov r8,r0 + shlr2 r0 + cmp/eq #0x3f,r0 + bt debug_kernel_fw +#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ + +debug_enter: +#if defined(CONFIG_SH_KGDB) + /* Jump to kgdb, pass stacked regs as arg */ +debug_kernel_sw: + mov.l 3f, r0 + jmp @r0 + mov r15, r4 + .align 2 +3: .long kgdb_handle_exception +#endif /* CONFIG_SH_KGDB */ + #if defined(CONFIG_SH_STANDARD_BIOS) /* Unwind the stack and jmp to the debug entry */ debug_kernel_fw: @@ -211,6 +269,276 @@ debug_kernel_fw: 2: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ +#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ + + + .align 2 +debug_trap: +#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shll r0 + shll r0 ! kernel space? + bt/s debug_kernel +#endif + mov.l @r15, r0 ! Restore R0 value + mov.l 1f, r8 + jmp @r8 + nop + + .align 2 +ENTRY(exception_error) + ! + STI() + mov.l 2f, r0 + jmp @r0 + nop + +! + .align 2 +1: .long break_point_trap_software +2: .long do_exception_error + + .align 2 +ret_from_exception: + preempt_stop() +ENTRY(ret_from_irq) + ! + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shll r0 + shll r0 ! kernel space? + bt/s resume_kernel ! Yes, it's from kernel, go back soon + GET_THREAD_INFO(r8) + +#ifdef CONFIG_PREEMPT + bra resume_userspace + nop +ENTRY(resume_kernel) + mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count + tst r0, r0 + bf noresched +need_resched: + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_NEED_RESCHED, r0 ! need_resched set? + bt noresched + + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + and #0xf0, r0 ! interrupts off (exception path)? + cmp/eq #0xf0, r0 + bt noresched + + mov.l 1f, r0 + mov.l r0, @(TI_PRE_COUNT,r8) + + STI() + mov.l 2f, r0 + jsr @r0 + nop + mov #0, r0 + mov.l r0, @(TI_PRE_COUNT,r8) + CLI() + + bra need_resched + nop +noresched: + bra restore_all + nop + + .align 2 +1: .long PREEMPT_ACTIVE +2: .long schedule +#endif + +ENTRY(resume_userspace) + ! r8: current_thread_info + CLI() + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_WORK_MASK, r0 + bt/s restore_all + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +work_pending: + ! r0: current_thread_info->flags + ! r8: current_thread_info + ! t: result of "tst #_TIF_NEED_RESCHED, r0" + bf/s work_resched + tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 +work_notifysig: + bt/s restore_all + mov r15, r4 + mov r12, r5 ! set arg1(save_r0) + mov r0, r6 + mov.l 2f, r1 + mova restore_all, r0 + jmp @r1 + lds r0, pr +work_resched: +#ifndef CONFIG_PREEMPT + ! gUSA handling + mov.l @(OFF_SP,r15), r0 ! get user space stack pointer + mov r0, r1 + shll r0 + bf/s 1f + shll r0 + bf/s 1f + mov #OFF_PC, r0 + ! SP >= 0xc0000000 : gUSA mark + mov.l @(r0,r15), r2 ! get user space PC (program counter) + mov.l @(OFF_R0,r15), r3 ! end point + cmp/hs r3, r2 ! r2 >= r3? + bt 1f + add r3, r1 ! rewind point #2 + mov.l r1, @(r0,r15) ! reset PC to rewind point #2 + ! +1: +#endif + mov.l 1f, r1 + jsr @r1 ! schedule + nop + CLI() + ! + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_WORK_MASK, r0 + bt restore_all + bra work_pending + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +1: .long schedule +2: .long do_notify_resume + + .align 2 +syscall_exit_work: + ! r0: current_thread_info->flags + ! r8: current_thread_info + tst #_TIF_SYSCALL_TRACE, r0 + bt/s work_pending + tst #_TIF_NEED_RESCHED, r0 + STI() + ! XXX setup arguments... + mov.l 4f, r0 ! do_syscall_trace + jsr @r0 + nop + bra resume_userspace + nop + + .align 2 +syscall_trace_entry: + ! Yes it is traced. + ! XXX setup arguments... + mov.l 4f, r11 ! Call do_syscall_trace which notifies + jsr @r11 ! superior (will chomp R[0-7]) + nop + ! Reload R0-R4 from kernel stack, where the + ! parent may have modified them using + ! ptrace(POKEUSR). (Note that R0-R2 are + ! used by the system call handler directly + ! from the kernel stack anyway, so don't need + ! to be reloaded here.) This allows the parent + ! to rewrite system calls and args on the fly. + mov.l @(OFF_R4,r15), r4 ! arg0 + mov.l @(OFF_R5,r15), r5 + mov.l @(OFF_R6,r15), r6 + mov.l @(OFF_R7,r15), r7 ! arg3 + mov.l @(OFF_R3,r15), r3 ! syscall_nr + ! Arrange for do_syscall_trace to be called + ! again as the system call returns. + mov.l 2f, r10 ! Number of syscalls + cmp/hs r10, r3 + bf syscall_call + mov #-ENOSYS, r0 + bra syscall_exit + mov.l r0, @(OFF_R0,r15) ! Return value + +/* + * Syscall interface: + * + * Syscall #: R3 + * Arguments #0 to #3: R4--R7 + * Arguments #4 to #6: R0, R1, R2 + * TRA: (number of arguments + 0x10) x 4 + * + * This code also handles delegating other traps to the BIOS/gdb stub + * according to: + * + * Trap number + * (TRA>>2) Purpose + * -------- ------- + * 0x0-0xf old syscall ABI + * 0x10-0x1f new syscall ABI + * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. + * + * Note: When we're first called, the TRA value must be shifted + * right 2 bits in order to get the value that was used as the "trapa" + * argument. + */ + + .align 2 + .globl ret_from_fork +ret_from_fork: + mov.l 1f, r8 + jsr @r8 + mov r0, r4 + bra syscall_exit + nop + .align 2 +1: .long schedule_tail + ! +ENTRY(system_call) + mov.l 1f, r9 + mov.l @r9, r8 ! Read from TRA (Trap Address) Register + ! + ! Is the trap argument >= 0x20? (TRA will be >= 0x80) + mov #0x7f, r9 + cmp/hi r9, r8 + bt/s 0f + mov #OFF_TRA, r9 + add r15, r9 + ! + mov.l r8, @r9 ! set TRA value to tra + STI() + ! Call the system call handler through the table. + ! First check for bad syscall number + mov r3, r9 + mov.l 2f, r8 ! Number of syscalls + cmp/hs r8, r9 + bf/s good_system_call + GET_THREAD_INFO(r8) +syscall_badsys: ! Bad syscall number + mov #-ENOSYS, r0 + bra resume_userspace + mov.l r0, @(OFF_R0,r15) ! Return value + ! +0: + bra debug_trap + nop + ! +good_system_call: ! Good syscall number + mov.l @(TI_FLAGS,r8), r8 + mov #_TIF_SYSCALL_TRACE, r10 + tst r10, r8 + bf syscall_trace_entry + ! +syscall_call: + shll2 r9 ! x4 + mov.l 3f, r8 ! Load the address of sys_call_table + add r8, r9 + mov.l @r9, r8 + jsr @r8 ! jump to specific syscall handler + nop + mov.l @(OFF_R0,r15), r12 ! save r0 + mov.l r0, @(OFF_R0,r15) ! save the return value + ! +syscall_exit: + CLI() + ! + GET_THREAD_INFO(r8) + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_ALLWORK_MASK, r0 + bf syscall_exit_work restore_all: mov.l @r15+, r0 mov.l @r15+, r1 @@ -278,9 +606,7 @@ skip_restore: ! ! Calculate new SR value mov k3, k2 ! original SR value - mov #0xf0, k1 - extu.b k1, k1 - not k1, k1 + mov.l 9f, k1 and k1, k2 ! Mask orignal SR value ! mov k3, k0 ! Calculate IMASK-bits @@ -306,12 +632,16 @@ skip_restore: nop .align 2 +1: .long TRA +2: .long NR_syscalls +3: .long sys_call_table +4: .long do_syscall_trace 5: .long 0x00001000 ! DSP 7: .long 0x30000000 +9: +__INV_IMASK: + .long 0xffffff0f ! ~(IMASK) -! common exception handler -#include "../../entry-common.S" - ! Exception Vector Base ! ! Should be aligned page boundary. @@ -331,176 +661,9 @@ general_exception: 2: .long ret_from_exception ! ! - -/* This code makes some assumptions to improve performance. - * Make sure they are stil true. */ -#if PTRS_PER_PGD != PTRS_PER_PTE -#error PGD and PTE sizes don't match -#endif - -/* gas doesn't flag impossible values for mov #immediate as an error */ -#if (_PAGE_PRESENT >> 2) > 0x7f -#error cannot load PAGE_PRESENT as an immediate -#endif -#if _PAGE_DIRTY > 0x7f -#error cannot load PAGE_DIRTY as an immediate -#endif -#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED -#error cannot derive PAGE_ACCESSED from PAGE_PRESENT -#endif - -#if defined(CONFIG_CPU_SH4) -#define ldmmupteh(r) mov.l 8f, r -#else -#define ldmmupteh(r) mov #MMU_PTEH, r -#endif - .balign 1024,0,1024 tlb_miss: -#ifdef COUNT_EXCEPTIONS - ! Increment the counts - mov.l 9f, k1 - mov.l @k1, k2 - add #1, k2 - mov.l k2, @k1 -#endif - - ! k0 scratch - ! k1 pgd and pte pointers - ! k2 faulting address - ! k3 pgd and pte index masks - ! k4 shift - - ! Load up the pgd entry (k1) - - ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH - - mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2 - mov #-(PGDIR_SHIFT-2), k4 ! 6 EX - - mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2) - - mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2) - - mov k2, k0 ! 5 MT (latency=0) - shld k4, k0 ! 99 EX - - and k3, k0 ! 78 EX - - mov.l @(k0, k1), k1 ! 21 LS (latency=2) - mov #-(PAGE_SHIFT-2), k4 ! 6 EX - - ! Load up the pte entry (k2) - - mov k2, k0 ! 5 MT (latency=0) - shld k4, k0 ! 99 EX - - tst k1, k1 ! 86 MT - - bt 20f ! 110 BR - - and k3, k0 ! 78 EX - mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT - - mov.l @(k0, k1), k2 ! 21 LS (latency=2) - add k0, k1 ! 49 EX - -#ifdef CONFIG_CPU_HAS_PTEA - ! Test the entry for present and _PAGE_ACCESSED - - mov #-28, k3 ! 6 EX - mov k2, k0 ! 5 MT (latency=0) - - tst k4, k2 ! 68 MT - shld k3, k0 ! 99 EX - - bt 20f ! 110 BR - - ! Set PTEA register - ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1) - ! - ! k0=pte>>28, k1=pte*, k2=pte, k3=, k4=_PAGE_PRESENT - - and #0xe, k0 ! 79 EX - - mov k0, k3 ! 5 MT (latency=0) - mov k2, k0 ! 5 MT (latency=0) - - and #1, k0 ! 79 EX - - or k0, k3 ! 82 EX - - ldmmupteh(k0) ! 9 LS (latency=2) - shll2 k4 ! 101 EX _PAGE_ACCESSED - - tst k4, k2 ! 68 MT - - mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS - - mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK - - ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED -#else - - ! Test the entry for present and _PAGE_ACCESSED - - mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK - tst k4, k2 ! 68 MT - - shll2 k4 ! 101 EX _PAGE_ACCESSED - ldmmupteh(k0) ! 9 LS (latency=2) - - bt 20f ! 110 BR - tst k4, k2 ! 68 MT - - ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED - -#endif - - ! Set up the entry - - and k2, k3 ! 78 EX - bt/s 10f ! 108 BR - - mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS - - ldtlb ! 128 CO - - ! At least one instruction between ldtlb and rte - nop ! 119 NOP - - rte ! 126 CO - - nop ! 119 NOP - - -10: or k4, k2 ! 82 EX - - ldtlb ! 128 CO - - ! At least one instruction between ldtlb and rte - mov.l k2, @k1 ! 27 LS - - rte ! 126 CO - - ! Note we cannot execute mov here, because it is executed after - ! restoring SSR, so would be executed in user space. - nop ! 119 NOP - - - .align 5 - ! Once cache line if possible... -1: .long swapper_pg_dir -4: .short (PTRS_PER_PGD-1) << 2 -5: .short _PAGE_PRESENT -7: .long _PAGE_FLAGS_HARDWARE_MASK -8: .long MMU_PTEH -#ifdef COUNT_EXCEPTIONS -9: .long exception_count_miss -#endif - - ! Either pgd or pte not present -20: mov.l 1f, k2 + mov.l 1f, k2 mov.l 4f, k3 bra handle_exception mov.l @k2, k2 @@ -547,9 +710,8 @@ ENTRY(handle_exception) bt/s 1f ! It's a kernel to kernel transition. mov r15, k0 ! save original stack to k0 /* User space to kernel */ - mov #(THREAD_SIZE >> 10), k1 + mov #(THREAD_SIZE >> 8), k1 shll8 k1 ! k1 := THREAD_SIZE - shll2 k1 add current, k1 mov k1, r15 ! change to kernel stack ! @@ -599,7 +761,7 @@ skip_save: ! Save the user registers on the stack. mov.l k2, @-r15 ! EXPEVT - mov #-1, k4 + mov #-1, k4 mov.l k4, @-r15 ! set TRA (default: -1) ! sts.l macl, @-r15 @@ -651,15 +813,6 @@ skip_save: bf interrupt_exception shlr2 r8 shlr r8 - -#ifdef COUNT_EXCEPTIONS - mov.l 5f, r9 - add r8, r9 - mov.l @r9, r10 - add #1, r10 - mov.l r10, @r9 -#endif - mov.l 4f, r9 add r8, r9 mov.l @r9, r9 @@ -673,9 +826,6 @@ skip_save: 2: .long 0x000080f0 ! FD=1, IMASK=15 3: .long 0xcfffffff ! RB=0, BL=0 4: .long exception_handling_table -#ifdef COUNT_EXCEPTIONS -5: .long exception_count_table -#endif interrupt_exception: mov.l 1f, r9 diff --git a/trunk/arch/sh/kernel/head.S b/trunk/arch/sh/kernel/head.S index 6aca4bc6ec5d..f5f53d14f245 100644 --- a/trunk/arch/sh/kernel/head.S +++ b/trunk/arch/sh/kernel/head.S @@ -33,7 +33,7 @@ ENTRY(empty_zero_page) .long 0x00360000 /* INITRD_START */ .long 0x000a0000 /* INITRD_SIZE */ .long 0 - .balign PAGE_SIZE,0,PAGE_SIZE + .balign 4096,0,4096 .text /* @@ -53,10 +53,8 @@ ENTRY(_stext) ldc r0, sr ! Initialize global interrupt mask mov #0, r0 -#ifdef CONFIG_CPU_HAS_SR_RB ldc r0, r6_bank -#endif - + /* * Prefetch if possible to reduce cache miss penalty. * @@ -70,14 +68,11 @@ ENTRY(_stext) ! mov.l 2f, r0 mov r0, r15 ! Set initial r15 (stack pointer) - mov #(THREAD_SIZE >> 10), r1 + mov #(THREAD_SIZE >> 8), r1 shll8 r1 ! r1 = THREAD_SIZE - shll2 r1 sub r1, r0 ! -#ifdef CONFIG_CPU_HAS_SR_RB ldc r0, r7_bank ! ... and initial thread_info -#endif - + ! Clear BSS area mov.l 3f, r1 add #4, r1 @@ -100,11 +95,7 @@ ENTRY(_stext) nop .balign 4 -#if defined(CONFIG_CPU_SH2) -1: .long 0x000000F0 ! IMASK=0xF -#else 1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF -#endif 2: .long init_thread_union+THREAD_SIZE 3: .long __bss_start 4: .long _end diff --git a/trunk/arch/sh/kernel/irq.c b/trunk/arch/sh/kernel/irq.c index 67be2b6e8cd1..944128ce9706 100644 --- a/trunk/arch/sh/kernel/irq.c +++ b/trunk/arch/sh/kernel/irq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include @@ -78,16 +78,15 @@ union irq_ctx { u32 stack[THREAD_SIZE/sizeof(u32)]; }; -static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; -static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; +static union irq_ctx *hardirq_ctx[NR_CPUS]; +static union irq_ctx *softirq_ctx[NR_CPUS]; #endif asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - struct pt_regs *old_regs = set_irq_regs(regs); + struct pt_regs *old_regs = set_irq_regs(®s); int irq; #ifdef CONFIG_4KSTACKS union irq_ctx *curctx, *irqctx; @@ -112,7 +111,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, #endif #ifdef CONFIG_CPU_HAS_INTEVT - irq = evt2irq(ctrl_inl(INTEVT)); + irq = (ctrl_inl(INTEVT) >> 5) - 16; #else irq = r4; #endif @@ -136,24 +135,17 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_sp = current_stack_pointer; - /* - * Copy the softirq bits in preempt_count so that the - * softirq checks work in the hardirq context. - */ - irqctx->tinfo.preempt_count = - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | - (curctx->tinfo.preempt_count & SOFTIRQ_MASK); - __asm__ __volatile__ ( "mov %0, r4 \n" - "mov r15, r8 \n" + "mov r15, r9 \n" "jsr @%1 \n" /* swith to the irq stack */ " mov %2, r15 \n" /* restore the stack (ring zero) */ - "mov r8, r15 \n" + "mov r9, r15 \n" : /* no outputs */ : "r" (irq), "r" (generic_handle_irq), "r" (isp) + /* XXX: A somewhat excessive clobber list? -PFM */ : "memory", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "t", "pr" ); @@ -201,7 +193,7 @@ void irq_ctx_init(int cpu) irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; - irqctx->tinfo.preempt_count = 0; + irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); softirq_ctx[cpu] = irqctx; @@ -247,38 +239,13 @@ asmlinkage void do_softirq(void) "mov r9, r15 \n" : /* no outputs */ : "r" (__do_softirq), "r" (isp) + /* XXX: A somewhat excessive clobber list? -PFM */ : "memory", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" ); - - /* - * Shouldnt happen, we returned above if in_interrupt(): - */ - WARN_ON_ONCE(softirq_count()); } local_irq_restore(flags); } EXPORT_SYMBOL(do_softirq); #endif - -void __init init_IRQ(void) -{ -#ifdef CONFIG_CPU_HAS_PINT_IRQ - init_IRQ_pint(); -#endif - -#ifdef CONFIG_CPU_HAS_INTC2_IRQ - init_IRQ_intc2(); -#endif - -#ifdef CONFIG_CPU_HAS_IPR_IRQ - init_IRQ_ipr(); -#endif - - /* Perform the machine specific initialisation */ - if (sh_mv.mv_init_irq) - sh_mv.mv_init_irq(); - - irq_ctx_init(smp_processor_id()); -} diff --git a/trunk/arch/sh/kernel/process.c b/trunk/arch/sh/kernel/process.c index f3e2631be144..a52b13ac6b7f 100644 --- a/trunk/arch/sh/kernel/process.c +++ b/trunk/arch/sh/kernel/process.c @@ -385,11 +385,10 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne asmlinkage int sys_fork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); #ifdef CONFIG_MMU - return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); + return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); #else /* fork almost works, enough to trick you into looking elsewhere :-( */ return -EINVAL; @@ -399,12 +398,11 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5, asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, unsigned long parent_tidptr, unsigned long child_tidptr, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); if (!newsp) - newsp = regs->regs[15]; - return do_fork(clone_flags, newsp, regs, 0, + newsp = regs.regs[15]; + return do_fork(clone_flags, newsp, ®s, 0, (int __user *)parent_tidptr, (int __user *)child_tidptr); } @@ -420,10 +418,9 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, */ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs, + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); } @@ -432,9 +429,8 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, */ asmlinkage int sys_execve(char *ufilename, char **uargv, char **uenvp, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int error; char *filename; @@ -446,7 +442,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv, error = do_execve(filename, (char __user * __user *)uargv, (char __user * __user *)uenvp, - regs); + ®s); if (error == 0) { task_lock(current); current->ptrace &= ~PT_DTRACE; @@ -476,7 +472,9 @@ unsigned long get_wchan(struct task_struct *p) return pc; } -asmlinkage void break_point_trap(void) +asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs regs) { /* Clear tracing. */ #if defined(CONFIG_CPU_SH4A) @@ -494,10 +492,8 @@ asmlinkage void break_point_trap(void) asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - - regs->pc -= 2; + regs.pc -= 2; force_sig(SIGTRAP, current); } diff --git a/trunk/arch/sh/kernel/relocate_kernel.S b/trunk/arch/sh/kernel/relocate_kernel.S index c66cb3209db5..8221b37c9773 100644 --- a/trunk/arch/sh/kernel/relocate_kernel.S +++ b/trunk/arch/sh/kernel/relocate_kernel.S @@ -7,9 +7,11 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ + #include -#include -#include + +#define PAGE_SIZE 4096 /* must be same value as in */ + .globl relocate_new_kernel relocate_new_kernel: @@ -18,8 +20,8 @@ relocate_new_kernel: /* r6 = start_address */ /* r7 = vbr_reg */ - mov.l 10f,r8 /* PAGE_SIZE */ - mov.l 11f,r9 /* P2SEG */ + mov.l 10f,r8 /* 4096 */ + mov.l 11f,r9 /* 0xa0000000 */ /* stack setting */ add r8,r5 @@ -30,7 +32,7 @@ relocate_new_kernel: 0: mov.l @r4+,r0 /* cmd = *ind++ */ -1: /* addr = (cmd | P2SEG) & 0xfffffff0 */ +1: /* addr = (cmd | 0xa0000000) & 0xfffffff0 */ mov r0,r2 or r9,r2 mov #-16,r1 @@ -90,7 +92,7 @@ relocate_new_kernel: 10: .long PAGE_SIZE 11: - .long P2SEG + .long 0xa0000000 relocate_new_kernel_end: diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index f8dd6b7bfab0..36d86f9ac38a 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -332,7 +332,8 @@ void __init setup_arch(char **cmdline_p) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START; + initrd_start = + INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " @@ -391,7 +392,6 @@ static int __init topology_init(void) subsys_initcall(topology_init); static const char *cpu_name[] = { - [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300", [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", @@ -404,7 +404,6 @@ static const char *cpu_name[] = { [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", - [CPU_SH7785] = "SH7785", [CPU_SH_NONE] = "Unknown" }; diff --git a/trunk/arch/sh/kernel/sh_ksyms.c b/trunk/arch/sh/kernel/sh_ksyms.c index ceee79143401..8a2fd19dc9eb 100644 --- a/trunk/arch/sh/kernel/sh_ksyms.c +++ b/trunk/arch/sh/kernel/sh_ksyms.c @@ -73,6 +73,8 @@ DECLARE_EXPORT(__lshrdi3); DECLARE_EXPORT(__movstr); DECLARE_EXPORT(__movstrSI16); +EXPORT_SYMBOL(strcpy); + #ifdef CONFIG_CPU_SH4 DECLARE_EXPORT(__movstr_i4_even); DECLARE_EXPORT(__movstr_i4_odd); @@ -99,6 +101,10 @@ EXPORT_SYMBOL(__down_trylock); EXPORT_SYMBOL(synchronize_irq); #endif +#ifdef CONFIG_PM +EXPORT_SYMBOL(pm_suspend); +#endif + EXPORT_SYMBOL(csum_partial); #ifdef CONFIG_IPV6 EXPORT_SYMBOL(csum_ipv6_magic); diff --git a/trunk/arch/sh/kernel/signal.c b/trunk/arch/sh/kernel/signal.c index bb1c480a59c7..5213f5bc6ce0 100644 --- a/trunk/arch/sh/kernel/signal.c +++ b/trunk/arch/sh/kernel/signal.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -38,7 +37,7 @@ asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); @@ -53,7 +52,7 @@ sys_sigsuspend(old_sigset_t mask, return -ERESTARTNOHAND; } -asmlinkage int +asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { @@ -88,11 +87,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - - return do_sigaltstack(uss, uoss, regs->regs[15]); + return do_sigaltstack(uss, uoss, regs.regs[15]); } @@ -101,11 +98,7 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, */ #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ -#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A) -#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ -#else -#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ -#endif +#define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */ #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ struct sigframe @@ -201,10 +194,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; + struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15]; sigset_t set; int r0; @@ -224,7 +216,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(regs, &frame->sc, &r0)) + if (restore_sigcontext(®s, &frame->sc, &r0)) goto badframe; return r0; @@ -235,10 +227,9 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15]; sigset_t set; stack_t st; int r0; @@ -255,14 +246,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) + if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0)) goto badframe; if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack(&st, NULL, regs->regs[15]); + do_sigaltstack(&st, NULL, regs.regs[15]); return r0; @@ -359,7 +350,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, } else { /* Generate return code (system call to sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); - err |= __put_user(TRAP_NOARG, &frame->retcode[1]); + err |= __put_user(TRAP16, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); @@ -439,7 +430,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); - err |= __put_user(TRAP_NOARG, &frame->retcode[1]); + err |= __put_user(TRAP16, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); diff --git a/trunk/arch/sh/kernel/stacktrace.c b/trunk/arch/sh/kernel/stacktrace.c deleted file mode 100644 index 0d5268afe80f..000000000000 --- a/trunk/arch/sh/kernel/stacktrace.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * arch/sh/kernel/stacktrace.c - * - * Stack trace management functions - * - * Copyright (C) 2006 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include - -/* - * Save stack-backtrace addresses into a stack_trace buffer. - */ -void save_stack_trace(struct stack_trace *trace, struct task_struct *task) -{ - unsigned long *sp; - - if (!task) - task = current; - if (task == current) - sp = (unsigned long *)current_stack_pointer; - else - sp = (unsigned long *)task->thread.sp; - - while (!kstack_end(sp)) { - unsigned long addr = *sp++; - - if (__kernel_text_address(addr)) { - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } - } -} diff --git a/trunk/arch/sh/kernel/sys_sh.c b/trunk/arch/sh/kernel/sys_sh.c index 5083b6ed4b39..8fde95001c34 100644 --- a/trunk/arch/sh/kernel/sys_sh.c +++ b/trunk/arch/sh/kernel/sys_sh.c @@ -33,15 +33,14 @@ */ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int fd[2]; int error; error = do_pipe(fd); if (!error) { - regs->regs[1] = fd[1]; + regs.regs[1] = fd[1]; return fd[0]; } return error; @@ -51,7 +50,6 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); -#ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ @@ -137,7 +135,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = COLOUR_ALIGN(addr, pgoff); } } -#endif /* CONFIG_MMU */ static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, diff --git a/trunk/arch/sh/kernel/time.c b/trunk/arch/sh/kernel/time.c index c206c9504c4b..57e708d7b52d 100644 --- a/trunk/arch/sh/kernel/time.c +++ b/trunk/arch/sh/kernel/time.c @@ -13,8 +13,6 @@ #include #include #include -#include -#include #include #include #include @@ -52,20 +50,15 @@ unsigned long long __attribute__ ((weak)) sched_clock(void) #ifndef CONFIG_GENERIC_TIME void do_gettimeofday(struct timeval *tv) { - unsigned long flags; unsigned long seq; unsigned long usec, sec; do { - /* - * Turn off IRQs when grabbing xtime_lock, so that - * the sys_timer get_offset code doesn't have to handle it. - */ - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_seqbegin(&xtime_lock); usec = get_timer_offset(); sec = xtime.tv_sec; - usec += xtime.tv_nsec / NSEC_PER_USEC; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + usec += xtime.tv_nsec / 1000; + } while (read_seqretry(&xtime_lock, seq)); while (usec >= 1000000) { usec -= 1000000; @@ -92,7 +85,7 @@ int do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - nsec -= get_timer_offset() * NSEC_PER_USEC; + nsec -= 1000 * get_timer_offset(); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); @@ -176,108 +169,6 @@ static struct sysdev_class timer_sysclass = { .resume = timer_resume, }; -#ifdef CONFIG_NO_IDLE_HZ -static int timer_dyn_tick_enable(void) -{ - struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; - unsigned long flags; - int ret = -ENODEV; - - if (dyn_tick) { - spin_lock_irqsave(&dyn_tick->lock, flags); - ret = 0; - if (!(dyn_tick->state & DYN_TICK_ENABLED)) { - ret = dyn_tick->enable(); - - if (ret == 0) - dyn_tick->state |= DYN_TICK_ENABLED; - } - spin_unlock_irqrestore(&dyn_tick->lock, flags); - } - - return ret; -} - -static int timer_dyn_tick_disable(void) -{ - struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; - unsigned long flags; - int ret = -ENODEV; - - if (dyn_tick) { - spin_lock_irqsave(&dyn_tick->lock, flags); - ret = 0; - if (dyn_tick->state & DYN_TICK_ENABLED) { - ret = dyn_tick->disable(); - - if (ret == 0) - dyn_tick->state &= ~DYN_TICK_ENABLED; - } - spin_unlock_irqrestore(&dyn_tick->lock, flags); - } - - return ret; -} - -/* - * Reprogram the system timer for at least the calculated time interval. - * This function should be called from the idle thread with IRQs disabled, - * immediately before sleeping. - */ -void timer_dyn_reprogram(void) -{ - struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; - unsigned long next, seq, flags; - - if (!dyn_tick) - return; - - spin_lock_irqsave(&dyn_tick->lock, flags); - if (dyn_tick->state & DYN_TICK_ENABLED) { - next = next_timer_interrupt(); - do { - seq = read_seqbegin(&xtime_lock); - dyn_tick->reprogram(next - jiffies); - } while (read_seqretry(&xtime_lock, seq)); - } - spin_unlock_irqrestore(&dyn_tick->lock, flags); -} - -static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) -{ - return sprintf(buf, "%i\n", - (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); -} - -static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, - size_t count) -{ - unsigned int enable = simple_strtoul(buf, NULL, 2); - - if (enable) - timer_dyn_tick_enable(); - else - timer_dyn_tick_disable(); - - return count; -} -static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick); - -/* - * dyntick=enable|disable - */ -static char dyntick_str[4] __initdata = ""; - -static int __init dyntick_setup(char *str) -{ - if (str) - strlcpy(dyntick_str, str, sizeof(dyntick_str)); - return 1; -} - -__setup("dyntick=", dyntick_setup); -#endif - static int __init timer_init_sysfs(void) { int ret = sysdev_class_register(&timer_sysclass); @@ -285,22 +176,7 @@ static int __init timer_init_sysfs(void) return ret; sys_timer->dev.cls = &timer_sysclass; - ret = sysdev_register(&sys_timer->dev); - -#ifdef CONFIG_NO_IDLE_HZ - if (ret == 0 && sys_timer->dyn_tick) { - ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick); - - /* - * Turn on dynamic tick after calibrate delay - * for correct bogomips - */ - if (ret == 0 && dyntick_str[0] == 'e') - ret = timer_dyn_tick_enable(); - } -#endif - - return ret; + return sysdev_register(&sys_timer->dev); } device_initcall(timer_init_sysfs); @@ -324,11 +200,6 @@ void __init time_init(void) sys_timer = get_sys_timer(); printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); -#ifdef CONFIG_NO_IDLE_HZ - if (sys_timer->dyn_tick) - spin_lock_init(&sys_timer->dyn_tick->lock); -#endif - #if defined(CONFIG_SH_KGDB) /* * Set up kgdb as requested. We do it here because the serial diff --git a/trunk/arch/sh/kernel/timers/Makefile b/trunk/arch/sh/kernel/timers/Makefile index bcf244ff6a12..151a6a304cec 100644 --- a/trunk/arch/sh/kernel/timers/Makefile +++ b/trunk/arch/sh/kernel/timers/Makefile @@ -5,6 +5,4 @@ obj-y := timer.o obj-$(CONFIG_SH_TMU) += timer-tmu.o -obj-$(CONFIG_SH_MTU2) += timer-mtu2.o -obj-$(CONFIG_SH_CMT) += timer-cmt.o diff --git a/trunk/arch/sh/kernel/timers/timer-cmt.c b/trunk/arch/sh/kernel/timers/timer-cmt.c deleted file mode 100644 index a574b93a4e7b..000000000000 --- a/trunk/arch/sh/kernel/timers/timer-cmt.c +++ /dev/null @@ -1,196 +0,0 @@ -/* - * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support - * - * Copyright (C) 2005 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(CONFIG_CPU_SUBTYPE_SH7619) -#define CMT_CMSTR 0xf84a0070 -#define CMT_CMCSR_0 0xf84a0072 -#define CMT_CMCNT_0 0xf84a0074 -#define CMT_CMCOR_0 0xf84a0076 -#define CMT_CMCSR_1 0xf84a0078 -#define CMT_CMCNT_1 0xf84a007a -#define CMT_CMCOR_1 0xf84a007c - -#define STBCR3 0xf80a0000 -#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0) -#define CMT_CMCSR_INIT 0x0040 -#define CMT_CMCSR_CALIB 0x0000 -#elif defined(CONFIG_CPU_SUBTYPE_SH7206) -#define CMT_CMSTR 0xfffec000 -#define CMT_CMCSR_0 0xfffec002 -#define CMT_CMCNT_0 0xfffec004 -#define CMT_CMCOR_0 0xfffec006 - -#define STBCR4 0xfffe040c -#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0) -#define CMT_CMCSR_INIT 0x0040 -#define CMT_CMCSR_CALIB 0x0000 -#else -#error "Unknown CPU SUBTYPE" -#endif - -static unsigned long cmt_timer_get_offset(void) -{ - int count; - static unsigned short count_p = 0xffff; /* for the first call after boot */ - static unsigned long jiffies_p = 0; - - /* - * cache volatile jiffies temporarily; we have IRQs turned off. - */ - unsigned long jiffies_t; - - /* timer count may underflow right here */ - count = ctrl_inw(CMT_CMCOR_0); - count -= ctrl_inw(CMT_CMCNT_0); - - jiffies_t = jiffies; - - /* - * avoiding timer inconsistencies (they are rare, but they happen)... - * there is one kind of problem that must be avoided here: - * 1. the timer counter underflows - */ - - if (jiffies_t == jiffies_p) { - if (count > count_p) { - /* the nutcase */ - if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */ - count -= LATCH; - } else { - printk("%s (): hardware timer problem?\n", - __FUNCTION__); - } - } - } else - jiffies_p = jiffies_t; - - count_p = count; - - count = ((LATCH-1) - count) * TICK_SIZE; - count = (count + LATCH/2) / LATCH; - - return count; -} - -static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id) -{ - unsigned long timer_status; - - /* Clear CMF bit */ - timer_status = ctrl_inw(CMT_CMCSR_0); - timer_status &= ~0x80; - ctrl_outw(timer_status, CMT_CMCSR_0); - - /* - * Here we are in the timer irq handler. We just have irqs locally - * disabled but we don't know if the timer_bh is running on the other - * CPU. We need to avoid to SMP race with it. NOTE: we don' t need - * the irq version of write_lock because as just said we have irq - * locally disabled. -arca - */ - write_seqlock(&xtime_lock); - handle_timer_tick(); - write_sequnlock(&xtime_lock); - - return IRQ_HANDLED; -} - -static struct irqaction cmt_irq = { - .name = "timer", - .handler = cmt_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, -}; - -static void cmt_clk_init(struct clk *clk) -{ - u8 divisor = CMT_CMCSR_INIT & 0x3; - ctrl_inw(CMT_CMCSR_0); - ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0); - clk->parent = clk_get(NULL, "module_clk"); - clk->rate = clk->parent->rate / (8 << (divisor << 1)); -} - -static void cmt_clk_recalc(struct clk *clk) -{ - u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3; - clk->rate = clk->parent->rate / (8 << (divisor << 1)); -} - -static struct clk_ops cmt_clk_ops = { - .init = cmt_clk_init, - .recalc = cmt_clk_recalc, -}; - -static struct clk cmt0_clk = { - .name = "cmt0_clk", - .ops = &cmt_clk_ops, -}; - -static int cmt_timer_start(void) -{ - ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR); - return 0; -} - -static int cmt_timer_stop(void) -{ - ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR); - return 0; -} - -static int cmt_timer_init(void) -{ - unsigned long interval; - - cmt_clock_enable(); - - setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq); - - cmt0_clk.parent = clk_get(NULL, "module_clk"); - - cmt_timer_stop(); - - interval = cmt0_clk.parent->rate / 8 / HZ; - printk(KERN_INFO "Interval = %ld\n", interval); - - ctrl_outw(interval, CMT_CMCOR_0); - - clk_register(&cmt0_clk); - clk_enable(&cmt0_clk); - - cmt_timer_start(); - - return 0; -} - -struct sys_timer_ops cmt_timer_ops = { - .init = cmt_timer_init, - .start = cmt_timer_start, - .stop = cmt_timer_stop, -#ifndef CONFIG_GENERIC_TIME - .get_offset = cmt_timer_get_offset, -#endif -}; - -struct sys_timer cmt_timer = { - .name = "cmt", - .ops = &cmt_timer_ops, -}; diff --git a/trunk/arch/sh/kernel/timers/timer-mtu2.c b/trunk/arch/sh/kernel/timers/timer-mtu2.c deleted file mode 100644 index fffcd1c09873..000000000000 --- a/trunk/arch/sh/kernel/timers/timer-mtu2.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support - * - * Copyright (C) 2005 Paul Mundt - * - * Based off of arch/sh/kernel/timers/timer-tmu.c - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * We use channel 1 for our lowly system timer. Channel 2 would be the other - * likely candidate, but we leave it alone as it has higher divisors that - * would be of more use to other more interesting applications. - * - * TODO: Presently we only implement a 16-bit single-channel system timer. - * However, we can implement channel cascade if we go the overflow route and - * get away with using 2 MTU2 channels as a 32-bit timer. - */ -#define MTU2_TSTR 0xfffe4280 -#define MTU2_TCR_1 0xfffe4380 -#define MTU2_TMDR_1 0xfffe4381 -#define MTU2_TIOR_1 0xfffe4382 -#define MTU2_TIER_1 0xfffe4384 -#define MTU2_TSR_1 0xfffe4385 -#define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */ -#define MTU2_TGRA_1 0xfffe438a - -#define STBCR3 0xfffe0408 - -#define MTU2_TSTR_CST1 (1 << 1) /* Counter Start 1 */ - -#define MTU2_TSR_TGFA (1 << 0) /* GRA compare match */ - -#define MTU2_TIER_TGIEA (1 << 0) /* GRA compare match interrupt enable */ - -#define MTU2_TCR_INIT 0x22 - -#define MTU2_TCR_CALIB 0x00 - -static unsigned long mtu2_timer_get_offset(void) -{ - int count; - static int count_p = 0x7fff; /* for the first call after boot */ - static unsigned long jiffies_p = 0; - - /* - * cache volatile jiffies temporarily; we have IRQs turned off. - */ - unsigned long jiffies_t; - - /* timer count may underflow right here */ - count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */ - - jiffies_t = jiffies; - - /* - * avoiding timer inconsistencies (they are rare, but they happen)... - * there is one kind of problem that must be avoided here: - * 1. the timer counter underflows - */ - - if (jiffies_t == jiffies_p) { - if (count > count_p) { - if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) { - count -= LATCH; - } else { - printk("%s (): hardware timer problem?\n", - __FUNCTION__); - } - } - } else - jiffies_p = jiffies_t; - - count_p = count; - - count = ((LATCH-1) - count) * TICK_SIZE; - count = (count + LATCH/2) / LATCH; - - return count; -} - -static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id) -{ - unsigned long timer_status; - - /* Clear TGFA bit */ - timer_status = ctrl_inb(MTU2_TSR_1); - timer_status &= ~MTU2_TSR_TGFA; - ctrl_outb(timer_status, MTU2_TSR_1); - - /* Do timer tick */ - write_seqlock(&xtime_lock); - handle_timer_tick(); - write_sequnlock(&xtime_lock); - - return IRQ_HANDLED; -} - -static struct irqaction mtu2_irq = { - .name = "timer", - .handler = mtu2_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, - .mask = CPU_MASK_NONE, -}; - -static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 }; - -static void mtu2_clk_init(struct clk *clk) -{ - u8 idx = MTU2_TCR_INIT & 0x7; - - clk->rate = clk->parent->rate / divisors[idx]; - /* Start TCNT counting */ - ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); - -} - -static void mtu2_clk_recalc(struct clk *clk) -{ - u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7; - clk->rate = clk->parent->rate / divisors[idx]; -} - -static struct clk_ops mtu2_clk_ops = { - .init = mtu2_clk_init, - .recalc = mtu2_clk_recalc, -}; - -static struct clk mtu2_clk1 = { - .name = "mtu2_clk1", - .ops = &mtu2_clk_ops, -}; - -static int mtu2_timer_start(void) -{ - ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); - return 0; -} - -static int mtu2_timer_stop(void) -{ - ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR); - return 0; -} - -static int mtu2_timer_init(void) -{ - u8 tmp; - unsigned long interval; - - setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq); - - mtu2_clk1.parent = clk_get(NULL, "module_clk"); - - ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3); - - /* Normal operation */ - ctrl_outb(0, MTU2_TMDR_1); - ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1); - ctrl_outb(0x01, MTU2_TIOR_1); - - /* Enable underflow interrupt */ - ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1); - - interval = CONFIG_SH_PCLK_FREQ / 16 / HZ; - printk(KERN_INFO "Interval = %ld\n", interval); - - ctrl_outw(interval, MTU2_TGRA_1); - ctrl_outw(0, MTU2_TCNT_1); - - clk_register(&mtu2_clk1); - clk_enable(&mtu2_clk1); - - return 0; -} - -struct sys_timer_ops mtu2_timer_ops = { - .init = mtu2_timer_init, - .start = mtu2_timer_start, - .stop = mtu2_timer_stop, -#ifndef CONFIG_GENERIC_TIME - .get_offset = mtu2_timer_get_offset, -#endif -}; - -struct sys_timer mtu2_timer = { - .name = "mtu2", - .ops = &mtu2_timer_ops, -}; diff --git a/trunk/arch/sh/kernel/timers/timer-tmu.c b/trunk/arch/sh/kernel/timers/timer-tmu.c index e060e71d0785..24927015dc31 100644 --- a/trunk/arch/sh/kernel/timers/timer-tmu.c +++ b/trunk/arch/sh/kernel/timers/timer-tmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -30,9 +31,13 @@ #define TMU0_TCR_CALIB 0x0000 +static DEFINE_SPINLOCK(tmu0_lock); + static unsigned long tmu_timer_get_offset(void) { int count; + unsigned long flags; + static int count_p = 0x7fffffff; /* for the first call after boot */ static unsigned long jiffies_p = 0; @@ -41,6 +46,7 @@ static unsigned long tmu_timer_get_offset(void) */ unsigned long jiffies_t; + spin_lock_irqsave(&tmu0_lock, flags); /* timer count may underflow right here */ count = ctrl_inl(TMU0_TCNT); /* read the latched count */ @@ -66,6 +72,7 @@ static unsigned long tmu_timer_get_offset(void) jiffies_p = jiffies_t; count_p = count; + spin_unlock_irqrestore(&tmu0_lock, flags); count = ((LATCH-1) - count) * TICK_SIZE; count = (count + LATCH/2) / LATCH; @@ -99,7 +106,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) static struct irqaction tmu_irq = { .name = "timer", .handler = tmu_timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, + .flags = IRQF_DISABLED, .mask = CPU_MASK_NONE, }; @@ -142,9 +149,9 @@ static int tmu_timer_init(void) { unsigned long interval; - setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); + setup_irq(TIMER_IRQ, &tmu_irq); - tmu0_clk.parent = clk_get(NULL, "module_clk"); + tmu0_clk.parent = clk_get("module_clk"); /* Start TMU0 */ tmu_timer_stop(); diff --git a/trunk/arch/sh/kernel/timers/timer.c b/trunk/arch/sh/kernel/timers/timer.c index a6bcc913d25e..dc1f631053a8 100644 --- a/trunk/arch/sh/kernel/timers/timer.c +++ b/trunk/arch/sh/kernel/timers/timer.c @@ -16,12 +16,6 @@ static struct sys_timer *sys_timers[] __initdata = { #ifdef CONFIG_SH_TMU &tmu_timer, -#endif -#ifdef CONFIG_SH_MTU2 - &mtu2_timer, -#endif -#ifdef CONFIG_SH_CMT - &cmt_timer, #endif NULL, }; diff --git a/trunk/arch/sh/kernel/traps.c b/trunk/arch/sh/kernel/traps.c index 3762d9dc2046..53dfa55f3156 100644 --- a/trunk/arch/sh/kernel/traps.c +++ b/trunk/arch/sh/kernel/traps.c @@ -18,14 +18,13 @@ #include #include #include -#include #include #include #ifdef CONFIG_SH_KGDB #include -#define CHK_REMOTE_DEBUG(regs) \ -{ \ +#define CHK_REMOTE_DEBUG(regs) \ +{ \ if (kgdb_debug_hook && !user_mode(regs))\ (*kgdb_debug_hook)(regs); \ } @@ -34,13 +33,8 @@ #endif #ifdef CONFIG_CPU_SH2 -# define TRAP_RESERVED_INST 4 -# define TRAP_ILLEGAL_SLOT_INST 6 -# define TRAP_ADDRESS_ERROR 9 -# ifdef CONFIG_CPU_SH2A -# define TRAP_DIVZERO_ERROR 17 -# define TRAP_DIVOVF_ERROR 18 -# endif +#define TRAP_RESERVED_INST 4 +#define TRAP_ILLEGAL_SLOT_INST 6 #else #define TRAP_RESERVED_INST 12 #define TRAP_ILLEGAL_SLOT_INST 13 @@ -94,7 +88,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (!user_mode(regs) || in_interrupt()) dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + - (unsigned long)task_stack_page(current)); + (unsigned long)task_stack_page(current)); bust_spinlocks(0); spin_unlock_irq(&die_lock); @@ -108,6 +102,8 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs, die(str, regs, err); } +static int handle_unaligned_notify_count = 10; + /* * try and fix up kernelspace address errors * - userspace errors just cause EFAULT to be returned, resulting in SEGV @@ -202,7 +198,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) if (copy_to_user(dst,src,4)) goto fetch_fault; ret = 0; - break; + break; case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ if (instruction & 4) @@ -226,7 +222,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) if (copy_from_user(dst,src,4)) goto fetch_fault; ret = 0; - break; + break; case 6: /* mov.[bwl] from memory, possibly with post-increment */ src = (unsigned char*) *rm; @@ -234,7 +230,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) *rm += count; dst = (unsigned char*) rn; *(unsigned long*)dst = 0; - + #ifdef __LITTLE_ENDIAN__ if (copy_from_user(dst, src, count)) goto fetch_fault; @@ -245,7 +241,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) } #else dst += 4-count; - + if (copy_from_user(dst, src, count)) goto fetch_fault; @@ -324,8 +320,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) return -EFAULT; /* kernel */ - die("delay-slot-insn faulting in handle_unaligned_delayslot", - regs, 0); + die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); } return handle_unaligned_ins(instruction,regs); @@ -347,13 +342,6 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) -/* - * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit - * opcodes.. - */ -#ifndef CONFIG_CPU_SH2A -static int handle_unaligned_notify_count = 10; - static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) { u_int rm; @@ -366,8 +354,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) if (user_mode(regs) && handle_unaligned_notify_count>0) { handle_unaligned_notify_count--; - printk(KERN_NOTICE "Fixing up unaligned userspace access " - "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", current->comm,current->pid,(u16*)regs->pc,instruction); } @@ -491,58 +478,32 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) regs->pc += 2; return ret; } -#endif /* CONFIG_CPU_SH2A */ - -#ifdef CONFIG_CPU_HAS_SR_RB -#define lookup_exception_vector(x) \ - __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) -#else -#define lookup_exception_vector(x) \ - __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) -#endif /* - * Handle various address error exceptions: - * - instruction address error: - * misaligned PC - * PC >= 0x80000000 in user mode - * - data address error (read and write) - * misaligned data access - * access to >= 0x80000000 is user mode - * Unfortuntaly we can't distinguish between instruction address error - * and data address errors caused by read acceses. + * Handle various address error exceptions */ -asmlinkage void do_address_error(struct pt_regs *regs, +asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { - unsigned long error_code = 0; + unsigned long error_code; mm_segment_t oldfs; - siginfo_t info; -#ifndef CONFIG_CPU_SH2A u16 instruction; int tmp; -#endif - /* Intentional ifdef */ -#ifdef CONFIG_CPU_HAS_SR_RB - lookup_exception_vector(error_code); -#endif + asm volatile("stc r2_bank,%0": "=r" (error_code)); oldfs = get_fs(); if (user_mode(regs)) { - int si_code = BUS_ADRERR; - local_irq_enable(); + current->thread.error_code = error_code; + current->thread.trap_no = (writeaccess) ? 8 : 7; /* bad PC is not something we can fix */ - if (regs->pc & 1) { - si_code = BUS_ADRALN; + if (regs->pc & 1) goto uspace_segv; - } -#ifndef CONFIG_CPU_SH2A set_fs(USER_DS); if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. @@ -557,23 +518,14 @@ asmlinkage void do_address_error(struct pt_regs *regs, if (tmp==0) return; /* sorted */ -#endif - -uspace_segv: - printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " - "access (PC %lx PR %lx)\n", current->comm, regs->pc, - regs->pr); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void *) address; - force_sig_info(SIGBUS, &info, current); + uspace_segv: + printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); + force_sig(SIGSEGV, current); } else { if (regs->pc & 1) die("unaligned program counter", regs, error_code); -#ifndef CONFIG_CPU_SH2A set_fs(KERNEL_DS); if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. @@ -585,12 +537,6 @@ asmlinkage void do_address_error(struct pt_regs *regs, handle_unaligned_access(instruction, regs); set_fs(oldfs); -#else - printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " - "access\n", current->comm); - - force_sig(SIGSEGV, current); -#endif } } @@ -602,7 +548,7 @@ int is_dsp_inst(struct pt_regs *regs) { unsigned short inst; - /* + /* * Safe guard if DSP mode is already enabled or we're lacking * the DSP altogether. */ @@ -623,49 +569,27 @@ int is_dsp_inst(struct pt_regs *regs) #define is_dsp_inst(regs) (0) #endif /* CONFIG_SH_DSP */ -#ifdef CONFIG_CPU_SH2A -asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs) -{ - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - siginfo_t info; - - switch (r4) { - case TRAP_DIVZERO_ERROR: - info.si_code = FPE_INTDIV; - break; - case TRAP_DIVOVF_ERROR: - info.si_code = FPE_INTOVF; - break; - } - - force_sig_info(SIGFPE, &info, current); -} -#endif - /* arch/sh/kernel/cpu/sh4/fpu.c */ extern int do_fpu_inst(unsigned short, struct pt_regs *); extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, struct pt_regs __regs); + unsigned long r6, unsigned long r7, struct pt_regs regs); asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); unsigned long error_code; struct task_struct *tsk = current; #ifdef CONFIG_SH_FPU_EMU - unsigned short inst = 0; + unsigned short inst; int err; - get_user(inst, (unsigned short*)regs->pc); + get_user(inst, (unsigned short*)regs.pc); - err = do_fpu_inst(inst, regs); + err = do_fpu_inst(inst, ®s); if (!err) { - regs->pc += 2; + regs.pc += 2; return; } /* not a FPU inst. */ @@ -673,19 +597,20 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, #ifdef CONFIG_SH_DSP /* Check if it's a DSP instruction */ - if (is_dsp_inst(regs)) { + if (is_dsp_inst(®s)) { /* Enable DSP mode, and restart instruction. */ - regs->sr |= SR_DSP; + regs.sr |= SR_DSP; return; } #endif - lookup_exception_vector(error_code); - + asm volatile("stc r2_bank, %0": "=r" (error_code)); local_irq_enable(); - CHK_REMOTE_DEBUG(regs); + tsk->thread.error_code = error_code; + tsk->thread.trap_no = TRAP_RESERVED_INST; + CHK_REMOTE_DEBUG(®s); force_sig(SIGILL, tsk); - die_if_no_fixup("reserved instruction", regs, error_code); + die_if_no_fixup("reserved instruction", ®s, error_code); } #ifdef CONFIG_SH_FPU_EMU @@ -733,41 +658,39 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); unsigned long error_code; struct task_struct *tsk = current; #ifdef CONFIG_SH_FPU_EMU - unsigned short inst = 0; + unsigned short inst; - get_user(inst, (unsigned short *)regs->pc + 1); - if (!do_fpu_inst(inst, regs)) { - get_user(inst, (unsigned short *)regs->pc); - if (!emulate_branch(inst, regs)) + get_user(inst, (unsigned short *)regs.pc + 1); + if (!do_fpu_inst(inst, ®s)) { + get_user(inst, (unsigned short *)regs.pc); + if (!emulate_branch(inst, ®s)) return; /* fault in branch.*/ } /* not a FPU inst. */ #endif - lookup_exception_vector(error_code); - + asm volatile("stc r2_bank, %0": "=r" (error_code)); local_irq_enable(); - CHK_REMOTE_DEBUG(regs); + tsk->thread.error_code = error_code; + tsk->thread.trap_no = TRAP_RESERVED_INST; + CHK_REMOTE_DEBUG(®s); force_sig(SIGILL, tsk); - die_if_no_fixup("illegal slot instruction", regs, error_code); + die_if_no_fixup("illegal slot instruction", ®s, error_code); } asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs __regs) + struct pt_regs regs) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); long ex; - - lookup_exception_vector(ex); - die_if_kernel("exception", regs, ex); + asm volatile("stc r2_bank, %0" : "=r" (ex)); + die_if_kernel("exception", ®s, ex); } #if defined(CONFIG_SH_STANDARD_BIOS) @@ -812,16 +735,12 @@ void *set_exception_table_vec(unsigned int vec, void *handler) { extern void *exception_handling_table[]; void *old_handler; - + old_handler = exception_handling_table[vec]; exception_handling_table[vec] = handler; return old_handler; } -extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); - void __init trap_init(void) { set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); @@ -840,15 +759,7 @@ void __init trap_init(void) set_exception_table_evt(0x800, do_fpu_state_restore); set_exception_table_evt(0x820, do_fpu_state_restore); #endif - -#ifdef CONFIG_CPU_SH2 - set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); -#endif -#ifdef CONFIG_CPU_SH2A - set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); - set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); -#endif - + /* Setup VBR for boot cpu */ per_cpu_trap_init(); } @@ -873,11 +784,6 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, } printk("\n"); - - if (!tsk) - tsk = current; - - debug_show_held_locks(tsk); } void show_stack(struct task_struct *tsk, unsigned long *sp) diff --git a/trunk/arch/sh/kernel/vsyscall/vsyscall.c b/trunk/arch/sh/kernel/vsyscall/vsyscall.c index deb46941f315..075d6cc1a2d7 100644 --- a/trunk/arch/sh/kernel/vsyscall/vsyscall.c +++ b/trunk/arch/sh/kernel/vsyscall/vsyscall.c @@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, goto up_fail; } - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); if (!vma) { ret = -ENOMEM; goto up_fail; diff --git a/trunk/arch/sh/mm/Kconfig b/trunk/arch/sh/mm/Kconfig index 4e0362f50384..9dd606464d23 100644 --- a/trunk/arch/sh/mm/Kconfig +++ b/trunk/arch/sh/mm/Kconfig @@ -4,12 +4,8 @@ menu "Processor selection" # Processor families # config CPU_SH2 - select SH_WRITETHROUGH if !CPU_SH2A bool - -config CPU_SH2A - bool - select CPU_SH2 + select SH_WRITETHROUGH config CPU_SH3 bool @@ -20,7 +16,6 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB - select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 config CPU_SH4A bool @@ -45,16 +40,6 @@ config CPU_SUBTYPE_SH7604 bool "Support SH7604 processor" select CPU_SH2 -config CPU_SUBTYPE_SH7619 - bool "Support SH7619 processor" - select CPU_SH2 - -comment "SH-2A Processor Support" - -config CPU_SUBTYPE_SH7206 - bool "Support SH7206 processor" - select CPU_SH2A - comment "SH-3 Processor Support" config CPU_SUBTYPE_SH7300 @@ -104,7 +89,6 @@ comment "SH-4 Processor Support" config CPU_SUBTYPE_SH7750 bool "Support SH7750 processor" select CPU_SH4 - select CPU_HAS_IPR_IRQ help Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. @@ -120,18 +104,15 @@ config CPU_SUBTYPE_SH7750R bool "Support SH7750R processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 - select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7750S bool "Support SH7750S processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 - select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7751 bool "Support SH7751 processor" select CPU_SH4 - select CPU_HAS_IPR_IRQ help Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, or if you have a HD6417751R CPU. @@ -140,7 +121,6 @@ config CPU_SUBTYPE_SH7751R bool "Support SH7751R processor" select CPU_SH4 select CPU_SUBTYPE_SH7751 - select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" @@ -177,11 +157,6 @@ config CPU_SUBTYPE_SH7780 select CPU_SH4A select CPU_HAS_INTC2_IRQ -config CPU_SUBTYPE_SH7785 - bool "Support SH7785 processor" - select CPU_SH4A - select CPU_HAS_INTC2_IRQ - comment "SH4AL-DSP Processor Support" config CPU_SUBTYPE_SH73180 @@ -241,22 +216,13 @@ config MEMORY_SIZE config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) + depends on CPU_SH4A && MMU default y help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. -config X2TLB - bool "Enable extended TLB mode" - depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL - help - Selecting this option will enable the extended mode of the SH-X2 - TLB. For legacy SH-X behaviour and interoperability, say N. For - all of the fun new features and a willingless to submit bug reports, - say Y. - config VSYSCALL bool "Support vsyscall page" depends on MMU @@ -270,53 +236,17 @@ config VSYSCALL For systems with an MMU that can afford to give up a page, (the default value) say Y. -choice - prompt "Kernel page size" - default PAGE_SIZE_4KB - -config PAGE_SIZE_4KB - bool "4kB" - help - This is the default page size used by all SuperH CPUs. - -config PAGE_SIZE_8KB - bool "8kB" - depends on EXPERIMENTAL && X2TLB - help - This enables 8kB pages as supported by SH-X2 and later MMUs. - -config PAGE_SIZE_64KB - bool "64kB" - depends on EXPERIMENTAL && CPU_SH4 - help - This enables support for 64kB pages, possible on all SH-4 - CPUs and later. Highly experimental, not recommended. - -endchoice - choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && CPU_SH4 && MMU default HUGETLB_PAGE_SIZE_64K config HUGETLB_PAGE_SIZE_64K - bool "64kB" - -config HUGETLB_PAGE_SIZE_256K - bool "256kB" - depends on X2TLB + bool "64K" config HUGETLB_PAGE_SIZE_1MB bool "1MB" -config HUGETLB_PAGE_SIZE_4MB - bool "4MB" - depends on X2TLB - -config HUGETLB_PAGE_SIZE_64MB - bool "64MB" - depends on X2TLB - endchoice source "mm/Kconfig" @@ -344,6 +274,7 @@ config SH_DIRECT_MAPPED config SH_WRITETHROUGH bool "Use write-through caching" + default y if CPU_SH2 help Selecting this option will configure the caches in write-through mode, as opposed to the default write-back configuration. diff --git a/trunk/arch/sh/mm/cache-sh2.c b/trunk/arch/sh/mm/cache-sh2.c index 6614033f6be9..2689cb24ea2b 100644 --- a/trunk/arch/sh/mm/cache-sh2.c +++ b/trunk/arch/sh/mm/cache-sh2.c @@ -5,7 +5,6 @@ * * Released under the terms of the GNU GPL v2.0. */ - #include #include @@ -15,43 +14,37 @@ #include #include -void __flush_wback_region(void *start, int size) -{ - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - /* FIXME cache purge */ - ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); - } -} - -void __flush_purge_region(void *start, int size) -{ - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); - } -} - -void __flush_invalidate_region(void *start, int size) +/* + * Calculate the OC address and set the way bit on the SH-2. + * + * We must have already jump_to_P2()'ed prior to calling this + * function, since we rely on CCR manipulation to do the + * Right Thing(tm). + */ +unsigned long __get_oc_addr(unsigned long set, unsigned long way) { - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); - } + unsigned long ccr; + + /* + * On SH-2 the way bit isn't tracked in the address field + * if we're doing address array access .. instead, we need + * to manually switch out the way in the CCR. + */ + ccr = ctrl_inl(CCR); + ccr &= ~0x00c0; + ccr |= way << cpu_data->dcache.way_shift; + + /* + * Despite the number of sets being halved, we end up losing + * the first 2 ways to OCRAM instead of the last 2 (if we're + * 4-way). As a result, forcibly setting the W1 bit handily + * bumps us up 2 ways. + */ + if (ccr & CCR_CACHE_ORA) + ccr |= 1 << (cpu_data->dcache.way_shift + 1); + + ctrl_outl(ccr, CCR); + + return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); } diff --git a/trunk/arch/sh/mm/cache-sh4.c b/trunk/arch/sh/mm/cache-sh4.c index ae531affccbd..e48cc22724d9 100644 --- a/trunk/arch/sh/mm/cache-sh4.c +++ b/trunk/arch/sh/mm/cache-sh4.c @@ -11,8 +11,12 @@ */ #include #include -#include -#include +#include +#include +#include +#include +#include +#include #include #include @@ -79,9 +83,9 @@ static void __init emit_cache_params(void) */ /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ -#define MAX_P3_MUTEXES 16 +#define MAX_P3_SEMAPHORES 16 -struct mutex p3map_mutex[MAX_P3_MUTEXES]; +struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; void __init p3_cache_init(void) { @@ -111,7 +115,7 @@ void __init p3_cache_init(void) panic("%s failed.", __FUNCTION__); for (i = 0; i < cpu_data->dcache.n_aliases; i++) - mutex_init(&p3map_mutex[i]); + sema_init(&p3map_sem[i], 1); } /* @@ -225,7 +229,7 @@ static inline void flush_cache_4096(unsigned long start, */ if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) - exec_offset = 0x20000000; + exec_offset = 0x20000000; local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, @@ -246,7 +250,7 @@ void flush_dcache_page(struct page *page) /* Loop all the D-cache */ n = cpu_data->dcache.n_aliases; - for (i = 0; i < n; i++, addr += 4096) + for (i = 0; i < n; i++, addr += PAGE_SIZE) flush_cache_4096(addr, phys); } diff --git a/trunk/arch/sh/mm/clear_page.S b/trunk/arch/sh/mm/clear_page.S index 8a706131e521..7b96425ae270 100644 --- a/trunk/arch/sh/mm/clear_page.S +++ b/trunk/arch/sh/mm/clear_page.S @@ -1,12 +1,12 @@ -/* +/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ + * * __clear_user_page, __clear_user, clear_page implementation of SuperH * * Copyright (C) 2001 Kaz Kojima * Copyright (C) 2001, 2002 Niibe Yutaka - * Copyright (C) 2006 Paul Mundt + * */ #include -#include /* * clear_page_slow @@ -18,11 +18,11 @@ /* * r0 --- scratch * r4 --- to - * r5 --- to + PAGE_SIZE + * r5 --- to + 4096 */ ENTRY(clear_page_slow) mov r4,r5 - mov.l .Llimit,r0 + mov.w .Llimit,r0 add r0,r5 mov #0,r0 ! @@ -50,7 +50,7 @@ ENTRY(clear_page_slow) ! rts nop -.Llimit: .long (PAGE_SIZE-28) +.Llimit: .word (4096-28) ENTRY(__clear_user) ! @@ -164,10 +164,10 @@ ENTRY(__clear_user) * r0 --- scratch * r4 --- to * r5 --- orig_to - * r6 --- to + PAGE_SIZE + * r6 --- to + 4096 */ ENTRY(__clear_user_page) - mov.l .Lpsz,r0 + mov.w .L4096,r0 mov r4,r6 add r0,r6 mov #0,r0 @@ -191,7 +191,7 @@ ENTRY(__clear_user_page) ! rts nop -.Lpsz: .long PAGE_SIZE +.L4096: .word 4096 #endif diff --git a/trunk/arch/sh/mm/copy_page.S b/trunk/arch/sh/mm/copy_page.S index 397c94c97315..1addffe117c3 100644 --- a/trunk/arch/sh/mm/copy_page.S +++ b/trunk/arch/sh/mm/copy_page.S @@ -1,12 +1,12 @@ -/* +/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ + * * copy_page, __copy_user_page, __copy_user implementation of SuperH * * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima * Copyright (C) 2002 Toshinobu Sugioka - * Copyright (C) 2006 Paul Mundt + * */ #include -#include /* * copy_page_slow @@ -18,7 +18,7 @@ /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + PAGE_SIZE + * r8 --- from + 4096 * r9 --- not used * r10 --- to * r11 --- from @@ -30,7 +30,7 @@ ENTRY(copy_page_slow) mov r4,r10 mov r5,r11 mov r5,r8 - mov.l .Lpsz,r0 + mov.w .L4096,r0 add r0,r8 ! 1: mov.l @r11+,r0 @@ -80,7 +80,7 @@ ENTRY(copy_page_slow) /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + PAGE_SIZE + * r8 --- from + 4096 * r9 --- orig_to * r10 --- to * r11 --- from @@ -94,7 +94,7 @@ ENTRY(__copy_user_page) mov r5,r11 mov r6,r9 mov r5,r8 - mov.l .Lpsz,r0 + mov.w .L4096,r0 add r0,r8 ! 1: ocbi @r9 @@ -129,7 +129,7 @@ ENTRY(__copy_user_page) rts nop #endif -.Lpsz: .long PAGE_SIZE +.L4096: .word 4096 /* * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); * Return the number of bytes NOT copied diff --git a/trunk/arch/sh/mm/fault.c b/trunk/arch/sh/mm/fault.c index 716ebf568af2..68663b8f99ae 100644 --- a/trunk/arch/sh/mm/fault.c +++ b/trunk/arch/sh/mm/fault.c @@ -26,19 +26,13 @@ extern void die(const char *,struct pt_regs *,long); * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address) +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, + unsigned long address) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page; - int si_code; - siginfo_t info; - - trace_hardirqs_on(); - local_irq_enable(); #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) @@ -47,46 +41,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, tsk = current; mm = tsk->mm; - si_code = SEGV_MAPERR; - - if (unlikely(address >= TASK_SIZE)) { - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "tsk" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - int offset = pgd_index(address); - pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - - pgd = get_TTB() + offset; - pgd_k = swapper_pg_dir + offset; - - /* This will never happen with the folded page table. */ - if (!pgd_present(*pgd)) { - if (!pgd_present(*pgd_k)) - goto bad_area_nosemaphore; - set_pgd(pgd, *pgd_k); - return; - } - - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); - if (pud_present(*pud) || !pud_present(*pud_k)) - goto bad_area_nosemaphore; - set_pud(pud, *pud_k); - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (pmd_present(*pmd) || !pmd_present(*pmd_k)) - goto bad_area_nosemaphore; - set_pmd(pmd, *pmd_k); - - return; - } /* * If we're in an interrupt or have no user @@ -111,7 +65,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * we can handle it.. */ good_area: - si_code = SEGV_ACCERR; if (writeaccess) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -151,13 +104,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, bad_area: up_read(&mm->mmap_sem); -bad_area_nosemaphore: if (user_mode(regs)) { - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void *) address; - force_sig_info(SIGSEGV, &info, tsk); + tsk->thread.address = address; + tsk->thread.error_code = writeaccess; + force_sig(SIGSEGV, tsk); return; } @@ -177,9 +127,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); printk(KERN_ALERT "pc = %08lx\n", regs->pc); - page = (unsigned long)get_TTB(); + asm volatile("mov.l %1, %0" + : "=r" (page) + : "m" (__m(MMU_TTB))); if (page) { - page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; + page = ((unsigned long *) page)[address >> 22]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; @@ -214,13 +166,98 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void *)address; - force_sig_info(SIGBUS, &info, tsk); + tsk->thread.address = address; + tsk->thread.error_code = writeaccess; + tsk->thread.trap_no = 14; + force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; } + +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. + */ +asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + struct mm_struct *mm = current->mm; + spinlock_t *ptl; + int ret = 1; + +#ifdef CONFIG_SH_KGDB + if (kgdb_nofault && kgdb_bus_err_hook) + kgdb_bus_err_hook(); +#endif + + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) { + pgd = pgd_offset_k(address); + mm = NULL; + } else { + if (unlikely(address >= TASK_SIZE || !mm)) + return 1; + + pgd = pgd_offset(mm, address); + } + + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); + if (pmd_none_or_clear_bad(pmd)) + return 1; + + if (mm) + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + else + pte = pte_offset_kernel(pmd, address); + + entry = *pte; + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) + goto unlock; + + if (writeaccess) + entry = pte_mkdirty(entry); + entry = pte_mkyoung(entry); + +#ifdef CONFIG_CPU_SH4 + /* + * ITLB is not affected by "ldtlb" instruction. + * So, we need to flush the entry by ourselves. + */ + __flush_tlb_page(get_asid(), address & PAGE_MASK); +#endif + + set_pte(pte, entry); + update_mmu_cache(NULL, address, entry); + ret = 0; +unlock: + if (mm) + pte_unmap_unlock(pte, ptl); + return ret; +} diff --git a/trunk/arch/sh/mm/hugetlbpage.c b/trunk/arch/sh/mm/hugetlbpage.c index cf2c2ee35a37..329059d6b54a 100644 --- a/trunk/arch/sh/mm/hugetlbpage.c +++ b/trunk/arch/sh/mm/hugetlbpage.c @@ -63,11 +63,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { diff --git a/trunk/arch/sh/mm/init.c b/trunk/arch/sh/mm/init.c index 59f4cc18235b..7154d1ce9785 100644 --- a/trunk/arch/sh/mm/init.c +++ b/trunk/arch/sh/mm/init.c @@ -84,22 +84,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd_t *pmd; pte_t *pte; - pgd = pgd_offset_k(addr); + pgd = swapper_pg_dir + pgd_index(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } - pud = pud_alloc(NULL, pgd, addr); - if (unlikely(!pud)) { - pud_ERROR(*pud); - return; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); + if (pmd != pmd_offset(pud, 0)) { + pud_ERROR(*pud); + return; + } } - pmd = pmd_alloc(NULL, pud, addr); - if (unlikely(!pmd)) { - pmd_ERROR(*pmd); - return; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); + if (pte != pte_offset_kernel(pmd, 0)) { + pmd_ERROR(*pmd); + return; + } } pte = pte_offset_kernel(pmd, addr); @@ -147,6 +155,9 @@ extern char __init_begin, __init_end; /* * paging_init() sets up the page tables + * + * This routines also unmaps the page at virtual kernel address 0, so + * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { @@ -169,11 +180,14 @@ void __init paging_init(void) */ { unsigned long max_dma, low, start_pfn; + pgd_t *pg_dir; + int i; + + /* We don't need kernel mapping as hardware support that. */ + pg_dir = swapper_pg_dir; - /* We don't need to map the kernel through the TLB, as - * it is permanatly mapped using P1. So clear the - * entire pgd. */ - memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); + for (i = 0; i < PTRS_PER_PGD; i++) + pgd_val(pg_dir[i]) = 0; /* Turn on the MMU */ enable_mmu(); @@ -192,10 +206,6 @@ void __init paging_init(void) } } - /* Set an initial value for the MMU.TTB so we don't have to - * check for a null value. */ - set_TTB(swapper_pg_dir); - #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If we don't have CONFIG_MMU set and the processor in question @@ -217,6 +227,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { + extern unsigned long empty_zero_page[1024]; int codesize, reservedpages, datasize, initsize; int tmp; extern unsigned long memory_start; diff --git a/trunk/arch/sh/mm/ioremap.c b/trunk/arch/sh/mm/ioremap.c index 11d54c149821..a9fe80cfc233 100644 --- a/trunk/arch/sh/mm/ioremap.c +++ b/trunk/arch/sh/mm/ioremap.c @@ -28,7 +28,9 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, { unsigned long end; unsigned long pfn; - pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | + _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); address &= ~PMD_MASK; end = address + size; diff --git a/trunk/arch/sh/mm/pg-dma.c b/trunk/arch/sh/mm/pg-dma.c index bb23679369d6..1406d2e348ca 100644 --- a/trunk/arch/sh/mm/pg-dma.c +++ b/trunk/arch/sh/mm/pg-dma.c @@ -39,6 +39,8 @@ static void copy_page_dma(void *to, void *from) static void clear_page_dma(void *to) { + extern unsigned long empty_zero_page[1024]; + /* * We get invoked quite early on, if the DMAC hasn't been initialized * yet, fall back on the slow manual implementation. diff --git a/trunk/arch/sh/mm/pg-sh4.c b/trunk/arch/sh/mm/pg-sh4.c index 3f98d2a4f936..07371ed7a313 100644 --- a/trunk/arch/sh/mm/pg-sh4.c +++ b/trunk/arch/sh/mm/pg-sh4.c @@ -6,12 +6,22 @@ * * Released under the terms of the GNU GPL v2.0. */ +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -extern struct mutex p3map_mutex[]; +extern struct semaphore p3map_sem[]; #define CACHE_ALIAS (cpu_data->dcache.alias_mask) @@ -27,6 +37,10 @@ void clear_user_page(void *to, unsigned long address, struct page *page) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | + _PAGE_RW | _PAGE_CACHABLE | + _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -36,8 +50,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page) pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); - mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); + down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -45,7 +59,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); pte_clear(&init_mm, p3_addr, pte); - mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); + up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } } @@ -63,6 +77,10 @@ void copy_user_page(void *to, void *from, unsigned long address, if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | + _PAGE_RW | _PAGE_CACHABLE | + _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -72,8 +90,8 @@ void copy_user_page(void *to, void *from, unsigned long address, pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); - mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); + down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -81,7 +99,7 @@ void copy_user_page(void *to, void *from, unsigned long address, update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); - mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); + up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } } @@ -104,3 +122,4 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t } return pte; } + diff --git a/trunk/arch/sh/mm/pmb.c b/trunk/arch/sh/mm/pmb.c index b60ad83a7635..92e745341e4d 100644 --- a/trunk/arch/sh/mm/pmb.c +++ b/trunk/arch/sh/mm/pmb.c @@ -30,7 +30,7 @@ #define NR_PMB_ENTRIES 16 -static struct kmem_cache *pmb_cache; +static kmem_cache_t *pmb_cache; static unsigned long pmb_map; static struct pmb_entry pmb_init_map[] = { @@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr) } while (pmbe); } -static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) +static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) { struct pmb_entry *pmbe = pmb; @@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long f spin_unlock_irq(&pmb_list_lock); } -static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) +static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) { spin_lock_irq(&pmb_list_lock); pmb_list_del(pmb); diff --git a/trunk/arch/sh/tools/mach-types b/trunk/arch/sh/tools/mach-types index 0571755e9a84..ac57638977ee 100644 --- a/trunk/arch/sh/tools/mach-types +++ b/trunk/arch/sh/tools/mach-types @@ -30,5 +30,3 @@ R7780MP SH_R7780MP TITAN SH_TITAN SHMIN SH_SHMIN 7710VOIPGW SH_7710VOIPGW -7206SE SH_7206_SOLUTION_ENGINE -7619SE SH_7619_SOLUTION_ENGINE diff --git a/trunk/arch/sh64/kernel/setup.c b/trunk/arch/sh64/kernel/setup.c index b9e7d54d7b85..ffb310e33cef 100644 --- a/trunk/arch/sh64/kernel/setup.c +++ b/trunk/arch/sh64/kernel/setup.c @@ -243,7 +243,9 @@ void __init setup_arch(char **cmdline_p) if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) { reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE); - initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START; + initrd_start = + (long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; + initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " diff --git a/trunk/arch/sh64/kernel/signal.c b/trunk/arch/sh64/kernel/signal.c index 1666d3efb52e..9e2ffc45c0e0 100644 --- a/trunk/arch/sh64/kernel/signal.c +++ b/trunk/arch/sh64/kernel/signal.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/arch/sh64/mm/fault.c b/trunk/arch/sh64/mm/fault.c index 4f72ab33bb2b..8e2f6c28b739 100644 --- a/trunk/arch/sh64/mm/fault.c +++ b/trunk/arch/sh64/mm/fault.c @@ -154,7 +154,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_interrupt() || !mm) goto no_context; /* TLB misses upon some cache flushes get done under cli() */ diff --git a/trunk/arch/sh64/mm/hugetlbpage.c b/trunk/arch/sh64/mm/hugetlbpage.c index 4b455f611146..187cf01750b8 100644 --- a/trunk/arch/sh64/mm/hugetlbpage.c +++ b/trunk/arch/sh64/mm/hugetlbpage.c @@ -53,11 +53,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { diff --git a/trunk/arch/sparc/mm/highmem.c b/trunk/arch/sparc/mm/highmem.c index 01fc6c254292..4d8ed9c65182 100644 --- a/trunk/arch/sparc/mm/highmem.c +++ b/trunk/arch/sparc/mm/highmem.c @@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); + inc_preempt_count(); if (!PageHighMem(page)) return page_address(page); @@ -70,7 +70,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); return; } @@ -96,7 +97,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) #endif #endif - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); } /* We may be fed a pagetable here by ptep_to_xxx and others. */ diff --git a/trunk/arch/sparc64/kernel/binfmt_elf32.c b/trunk/arch/sparc64/kernel/binfmt_elf32.c index 9ad84ff10a17..a98f3ae175a3 100644 --- a/trunk/arch/sparc64/kernel/binfmt_elf32.c +++ b/trunk/arch/sparc64/kernel/binfmt_elf32.c @@ -141,6 +141,7 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) value->tv_sec = jiffies / HZ; } +#define elf_addr_t u32 #undef start_thread #define start_thread start_thread32 #define init_elf_binfmt init_elf32_binfmt diff --git a/trunk/arch/sparc64/mm/hugetlbpage.c b/trunk/arch/sparc64/mm/hugetlbpage.c index 33fd0b265e70..53b9b1f528e5 100644 --- a/trunk/arch/sparc64/mm/hugetlbpage.c +++ b/trunk/arch/sparc64/mm/hugetlbpage.c @@ -235,11 +235,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { diff --git a/trunk/arch/sparc64/mm/init.c b/trunk/arch/sparc64/mm/init.c index a8e8802eed4d..09cb7fccc03a 100644 --- a/trunk/arch/sparc64/mm/init.c +++ b/trunk/arch/sparc64/mm/init.c @@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; -struct kmem_cache *pgtable_cache __read_mostly; +kmem_cache_t *pgtable_cache __read_mostly; -static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) { clear_page(addr); } diff --git a/trunk/arch/sparc64/mm/tsb.c b/trunk/arch/sparc64/mm/tsb.c index 236d02f41a01..beaa02810f0e 100644 --- a/trunk/arch/sparc64/mm/tsb.c +++ b/trunk/arch/sparc64/mm/tsb.c @@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign } } -static struct kmem_cache *tsb_caches[8] __read_mostly; +static kmem_cache_t *tsb_caches[8] __read_mostly; static const char *tsb_cache_names[8] = { "tsb_8KB", diff --git a/trunk/arch/um/drivers/chan_kern.c b/trunk/arch/um/drivers/chan_kern.c index 7d4190e55654..3576b3cc505e 100644 --- a/trunk/arch/um/drivers/chan_kern.c +++ b/trunk/arch/um/drivers/chan_kern.c @@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans) return -1; } -void chan_interrupt(struct list_head *chans, struct delayed_work *task, +void chan_interrupt(struct list_head *chans, struct work_struct *task, struct tty_struct *tty, int irq) { struct list_head *ele, *next; diff --git a/trunk/arch/um/drivers/daemon_kern.c b/trunk/arch/um/drivers/daemon_kern.c index 9c2e7a758f21..824386974f88 100644 --- a/trunk/arch/um/drivers/daemon_kern.c +++ b/trunk/arch/um/drivers/daemon_kern.c @@ -98,4 +98,4 @@ static int register_daemon(void) return 0; } -late_initcall(register_daemon); +__initcall(register_daemon); diff --git a/trunk/arch/um/drivers/line.c b/trunk/arch/um/drivers/line.c index aa3090d05a8f..426633e5d6e3 100644 --- a/trunk/arch/um/drivers/line.c +++ b/trunk/arch/um/drivers/line.c @@ -31,9 +31,9 @@ static irqreturn_t line_interrupt(int irq, void *data) return IRQ_HANDLED; } -static void line_timer_cb(struct work_struct *work) +static void line_timer_cb(void *arg) { - struct line *line = container_of(work, struct line, task.work); + struct line *line = arg; if(!line->throttled) chan_interrupt(&line->chan_list, &line->task, line->tty, @@ -443,7 +443,7 @@ int line_open(struct line *lines, struct tty_struct *tty) * is registered. */ enable_chan(line); - INIT_DELAYED_WORK(&line->task, line_timer_cb); + INIT_WORK(&line->task, line_timer_cb, line); if(!line->sigio){ chan_enable_winch(&line->chan_list, tty); diff --git a/trunk/arch/um/drivers/mcast_kern.c b/trunk/arch/um/drivers/mcast_kern.c index 52ccb7b53cd2..c090fbd464e7 100644 --- a/trunk/arch/um/drivers/mcast_kern.c +++ b/trunk/arch/um/drivers/mcast_kern.c @@ -127,4 +127,4 @@ static int register_mcast(void) return 0; } -late_initcall(register_mcast); +__initcall(register_mcast); diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c index 96f0189327af..7b172160fe04 100644 --- a/trunk/arch/um/drivers/mconsole_kern.c +++ b/trunk/arch/um/drivers/mconsole_kern.c @@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = { static LIST_HEAD(mc_requests); -static void mc_work_proc(struct work_struct *unused) +static void mc_work_proc(void *unused) { struct mconsole_entry *req; unsigned long flags; @@ -72,7 +72,7 @@ static void mc_work_proc(struct work_struct *unused) } } -static DECLARE_WORK(mconsole_work, mc_work_proc); +static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); static irqreturn_t mconsole_interrupt(int irq, void *dev_id) { diff --git a/trunk/arch/um/drivers/net_kern.c b/trunk/arch/um/drivers/net_kern.c index 286bc0b3207f..ec9eb8bd9432 100644 --- a/trunk/arch/um/drivers/net_kern.c +++ b/trunk/arch/um/drivers/net_kern.c @@ -99,7 +99,6 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id) * same device, since it tests for (dev->flags & IFF_UP). So * there's no harm in delaying the device shutdown. */ schedule_work(&close_work); -#error this is not permitted - close_work will go out of scope goto out; } reactivate_fd(lp->fd, UM_ETH_IRQ); diff --git a/trunk/arch/um/drivers/pcap_kern.c b/trunk/arch/um/drivers/pcap_kern.c index e67362acf0e7..6e1ef8558283 100644 --- a/trunk/arch/um/drivers/pcap_kern.c +++ b/trunk/arch/um/drivers/pcap_kern.c @@ -109,4 +109,4 @@ static int register_pcap(void) return 0; } -late_initcall(register_pcap); +__initcall(register_pcap); diff --git a/trunk/arch/um/drivers/port_kern.c b/trunk/arch/um/drivers/port_kern.c index 6dfe632f1c14..ce9f3733f73e 100644 --- a/trunk/arch/um/drivers/port_kern.c +++ b/trunk/arch/um/drivers/port_kern.c @@ -132,7 +132,7 @@ static int port_accept(struct port_list *port) DECLARE_MUTEX(ports_sem); struct list_head ports = LIST_HEAD_INIT(ports); -void port_work_proc(struct work_struct *unused) +void port_work_proc(void *unused) { struct port_list *port; struct list_head *ele; @@ -150,7 +150,7 @@ void port_work_proc(struct work_struct *unused) local_irq_restore(flags); } -DECLARE_WORK(port_work, port_work_proc); +DECLARE_WORK(port_work, port_work_proc, NULL); static irqreturn_t port_interrupt(int irq, void *data) { diff --git a/trunk/arch/um/drivers/slip_kern.c b/trunk/arch/um/drivers/slip_kern.c index 25634bd1f585..788da5439a2d 100644 --- a/trunk/arch/um/drivers/slip_kern.c +++ b/trunk/arch/um/drivers/slip_kern.c @@ -95,4 +95,4 @@ static int register_slip(void) return 0; } -late_initcall(register_slip); +__initcall(register_slip); diff --git a/trunk/arch/um/drivers/slirp_kern.c b/trunk/arch/um/drivers/slirp_kern.c index b3ed8fb874ab..ae322e1c8a87 100644 --- a/trunk/arch/um/drivers/slirp_kern.c +++ b/trunk/arch/um/drivers/slirp_kern.c @@ -119,4 +119,4 @@ static int register_slirp(void) return 0; } -late_initcall(register_slirp); +__initcall(register_slirp); diff --git a/trunk/arch/um/include/chan_kern.h b/trunk/arch/um/include/chan_kern.h index 9003a343e148..572d286ed2c6 100644 --- a/trunk/arch/um/include/chan_kern.h +++ b/trunk/arch/um/include/chan_kern.h @@ -27,7 +27,7 @@ struct chan { void *data; }; -extern void chan_interrupt(struct list_head *chans, struct delayed_work *task, +extern void chan_interrupt(struct list_head *chans, struct work_struct *task, struct tty_struct *tty, int irq); extern int parse_chan_pair(char *str, struct line *line, int device, const struct chan_opts *opts); diff --git a/trunk/arch/um/include/line.h b/trunk/arch/um/include/line.h index 214ee76c40df..7be24811bb30 100644 --- a/trunk/arch/um/include/line.h +++ b/trunk/arch/um/include/line.h @@ -51,7 +51,7 @@ struct line { char *tail; int sigio; - struct delayed_work task; + struct work_struct task; const struct line_driver *driver; int have_irq; }; diff --git a/trunk/arch/um/include/sysdep-i386/ptrace.h b/trunk/arch/um/include/sysdep-i386/ptrace.h index 52b398bcafcf..6670cc992ecb 100644 --- a/trunk/arch/um/include/sysdep-i386/ptrace.h +++ b/trunk/arch/um/include/sysdep-i386/ptrace.h @@ -75,7 +75,7 @@ union uml_pt_regs { #endif #ifdef UML_CONFIG_MODE_SKAS struct skas_regs { - unsigned long regs[MAX_REG_NR]; + unsigned long regs[HOST_FRAME_SIZE]; unsigned long fp[HOST_FP_SIZE]; unsigned long xfp[HOST_XFP_SIZE]; struct faultinfo faultinfo; diff --git a/trunk/arch/um/include/sysdep-i386/stub.h b/trunk/arch/um/include/sysdep-i386/stub.h index 4fffae75ba53..b492b12b4a10 100644 --- a/trunk/arch/um/include/sysdep-i386/stub.h +++ b/trunk/arch/um/include/sysdep-i386/stub.h @@ -9,7 +9,6 @@ #include #include #include -#include #include "stub-data.h" #include "kern_constants.h" #include "uml-config.h" diff --git a/trunk/arch/um/include/sysdep-x86_64/ptrace.h b/trunk/arch/um/include/sysdep-x86_64/ptrace.h index 66cb400c2c92..617bb9efc934 100644 --- a/trunk/arch/um/include/sysdep-x86_64/ptrace.h +++ b/trunk/arch/um/include/sysdep-x86_64/ptrace.h @@ -108,7 +108,7 @@ union uml_pt_regs { * file size, while i386 uses FRAME_SIZE. Therefore, we need * to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE. */ - unsigned long regs[MAX_REG_NR]; + unsigned long regs[UM_FRAME_SIZE]; unsigned long fp[HOST_FP_SIZE]; struct faultinfo faultinfo; long syscall; diff --git a/trunk/arch/um/os-Linux/drivers/ethertap_kern.c b/trunk/arch/um/os-Linux/drivers/ethertap_kern.c index 70541821775f..16385e2ada85 100644 --- a/trunk/arch/um/os-Linux/drivers/ethertap_kern.c +++ b/trunk/arch/um/os-Linux/drivers/ethertap_kern.c @@ -105,4 +105,4 @@ static int register_ethertap(void) return 0; } -late_initcall(register_ethertap); +__initcall(register_ethertap); diff --git a/trunk/arch/um/os-Linux/drivers/tuntap_kern.c b/trunk/arch/um/os-Linux/drivers/tuntap_kern.c index 76570a2c25c3..0edbac63c527 100644 --- a/trunk/arch/um/os-Linux/drivers/tuntap_kern.c +++ b/trunk/arch/um/os-Linux/drivers/tuntap_kern.c @@ -90,4 +90,4 @@ static int register_tuntap(void) return 0; } -late_initcall(register_tuntap); +__initcall(register_tuntap); diff --git a/trunk/arch/um/sys-i386/ldt.c b/trunk/arch/um/sys-i386/ldt.c index 49057d8bc668..e299ee5a753d 100644 --- a/trunk/arch/um/sys-i386/ldt.c +++ b/trunk/arch/um/sys-i386/ldt.c @@ -3,6 +3,7 @@ * Licensed under the GPL */ +#include "linux/stddef.h" #include "linux/sched.h" #include "linux/slab.h" #include "linux/types.h" diff --git a/trunk/arch/um/sys-i386/ptrace_user.c b/trunk/arch/um/sys-i386/ptrace_user.c index 01212c88fcc4..5f3cc6685820 100644 --- a/trunk/arch/um/sys-i386/ptrace_user.c +++ b/trunk/arch/um/sys-i386/ptrace_user.c @@ -4,9 +4,9 @@ */ #include -#include #include #include +#include #include "ptrace_user.h" /* Grr, asm/user.h includes asm/ptrace.h, so has to follow ptrace_user.h */ #include diff --git a/trunk/arch/um/sys-i386/user-offsets.c b/trunk/arch/um/sys-i386/user-offsets.c index 447306b20aea..6f4ef2b7fa4a 100644 --- a/trunk/arch/um/sys-i386/user-offsets.c +++ b/trunk/arch/um/sys-i386/user-offsets.c @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #define DEFINE(sym, val) \ diff --git a/trunk/arch/x86_64/defconfig b/trunk/arch/x86_64/defconfig index 0f5d44e86be5..96f226cfb339 100644 --- a/trunk/arch/x86_64/defconfig +++ b/trunk/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.19-rc2-git4 -# Sat Oct 21 03:38:52 2006 +# Linux kernel version: 2.6.19-git7 +# Wed Dec 6 23:50:47 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -47,13 +47,14 @@ CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_CPUSETS is not set +CONFIG_SYSFS_DEPRECATED=y # CONFIG_RELAY is not set CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y # CONFIG_EMBEDDED is not set CONFIG_UID16=y -# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y # CONFIG_KALLSYMS_EXTRA_PASS is not set @@ -87,9 +88,7 @@ CONFIG_STOP_MACHINE=y # Block layer # CONFIG_BLOCK=y -CONFIG_LBD=y # CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set # # IO Schedulers @@ -111,10 +110,11 @@ CONFIG_X86_PC=y # CONFIG_X86_VSMP is not set # CONFIG_MK8 is not set # CONFIG_MPSC is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_L1_CACHE_BYTES=128 -CONFIG_X86_L1_CACHE_SHIFT=7 -CONFIG_X86_INTERNODE_CACHE_BYTES=128 +CONFIG_MCORE2=y +# CONFIG_GENERIC_CPU is not set +CONFIG_X86_L1_CACHE_BYTES=64 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_INTERNODE_CACHE_BYTES=64 CONFIG_X86_TSC=y CONFIG_X86_GOOD_APIC=y # CONFIG_MICROCODE is not set @@ -322,6 +322,7 @@ CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -624,6 +625,7 @@ CONFIG_SATA_INTEL_COMBINED=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_JMICRON is not set # CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_MARVELL is not set # CONFIG_PATA_MPIIX is not set # CONFIG_PATA_OLDPIIX is not set # CONFIG_PATA_NETCELL is not set @@ -795,6 +797,7 @@ CONFIG_BNX2=y CONFIG_S2IO=m # CONFIG_S2IO_NAPI is not set # CONFIG_MYRI10GE is not set +# CONFIG_NETXEN_NIC is not set # # Token Ring devices @@ -927,10 +930,6 @@ CONFIG_RTC=y # CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set - -# -# Ftape, the floppy tape device driver -# CONFIG_AGP=y CONFIG_AGP_AMD64=y CONFIG_AGP_INTEL=y @@ -1135,6 +1134,7 @@ CONFIG_USB_DEVICEFS=y # CONFIG_USB_BANDWIDTH is not set # CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_SUSPEND is not set +# CONFIG_USB_MULTITHREAD_PROBE is not set # CONFIG_USB_OTG is not set # @@ -1212,6 +1212,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_KAWETH is not set # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET_MII is not set # CONFIG_USB_USBNET is not set CONFIG_USB_MON=y diff --git a/trunk/arch/x86_64/ia32/ia32_binfmt.c b/trunk/arch/x86_64/ia32/ia32_binfmt.c index 543ef4f405e9..82ef182de6ae 100644 --- a/trunk/arch/x86_64/ia32/ia32_binfmt.c +++ b/trunk/arch/x86_64/ia32/ia32_binfmt.c @@ -305,6 +305,8 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen"); #undef MODULE_DESCRIPTION #undef MODULE_AUTHOR +#define elf_addr_t __u32 + static void elf32_init(struct pt_regs *); #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 @@ -349,7 +351,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!mpnt) return -ENOMEM; diff --git a/trunk/arch/x86_64/ia32/syscall32.c b/trunk/arch/x86_64/ia32/syscall32.c index 3e5ed20cba45..3a01329473ab 100644 --- a/trunk/arch/x86_64/ia32/syscall32.c +++ b/trunk/arch/x86_64/ia32/syscall32.c @@ -49,7 +49,7 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack) struct mm_struct *mm = current->mm; int ret; - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) return -ENOMEM; diff --git a/trunk/arch/x86_64/kernel/crash.c b/trunk/arch/x86_64/kernel/crash.c index 95a7a2c13131..3525f884af82 100644 --- a/trunk/arch/x86_64/kernel/crash.c +++ b/trunk/arch/x86_64/kernel/crash.c @@ -28,6 +28,71 @@ /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, + void *data, size_t data_len) +{ + struct elf_note note; + + note.n_namesz = strlen(name) + 1; + note.n_descsz = data_len; + note.n_type = type; + memcpy(buf, ¬e, sizeof(note)); + buf += (sizeof(note) +3)/4; + memcpy(buf, name, note.n_namesz); + buf += (note.n_namesz + 3)/4; + memcpy(buf, data, note.n_descsz); + buf += (note.n_descsz + 3)/4; + + return buf; +} + +static void final_note(u32 *buf) +{ + struct elf_note note; + + note.n_namesz = 0; + note.n_descsz = 0; + note.n_type = 0; + memcpy(buf, ¬e, sizeof(note)); +} + +static void crash_save_this_cpu(struct pt_regs *regs, int cpu) +{ + struct elf_prstatus prstatus; + u32 *buf; + + if ((cpu < 0) || (cpu >= NR_CPUS)) + return; + + /* Using ELF notes here is opportunistic. + * I need a well defined structure format + * for the data I pass, and I need tags + * on the data to indicate what information I have + * squirrelled away. ELF notes happen to provide + * all of that, no need to invent something new. + */ + + buf = (u32*)per_cpu_ptr(crash_notes, cpu); + + if (!buf) + return; + + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + elf_core_copy_regs(&prstatus.pr_reg, regs); + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + final_note(buf); +} + +static void crash_save_self(struct pt_regs *regs) +{ + int cpu; + + cpu = smp_processor_id(); + crash_save_this_cpu(regs, cpu); +} + #ifdef CONFIG_SMP static atomic_t waiting_for_crash_ipi; @@ -52,7 +117,7 @@ static int crash_nmi_callback(struct notifier_block *self, return NOTIFY_STOP; local_irq_disable(); - crash_save_cpu(regs, cpu); + crash_save_this_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ @@ -131,5 +196,5 @@ void machine_crash_shutdown(struct pt_regs *regs) disable_IO_APIC(); - crash_save_cpu(regs, smp_processor_id()); + crash_save_self(regs); } diff --git a/trunk/arch/x86_64/kernel/kprobes.c b/trunk/arch/x86_64/kernel/kprobes.c index 209c8c0bec71..ac241567e682 100644 --- a/trunk/arch/x86_64/kernel/kprobes.c +++ b/trunk/arch/x86_64/kernel/kprobes.c @@ -224,7 +224,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); + free_insn_slot(p->ainsn.insn); mutex_unlock(&kprobe_mutex); } diff --git a/trunk/arch/x86_64/kernel/mce.c b/trunk/arch/x86_64/kernel/mce.c index bc863c464a1f..bbea88801d88 100644 --- a/trunk/arch/x86_64/kernel/mce.c +++ b/trunk/arch/x86_64/kernel/mce.c @@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) */ static int check_interval = 5 * 60; /* 5 minutes */ -static void mcheck_timer(struct work_struct *work); -static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); +static void mcheck_timer(void *data); +static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); static void mcheck_check_cpu(void *info) { @@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info) do_machine_check(NULL, 0); } -static void mcheck_timer(struct work_struct *work) +static void mcheck_timer(void *data) { on_each_cpu(mcheck_check_cpu, NULL, 1, 1); schedule_delayed_work(&mcheck_work, check_interval * HZ); @@ -641,6 +641,7 @@ static __cpuinit int mce_create_device(unsigned int cpu) return err; } +#ifdef CONFIG_HOTPLUG_CPU static void mce_remove_device(unsigned int cpu) { int i; @@ -673,6 +674,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) static struct notifier_block mce_cpu_notifier = { .notifier_call = mce_cpu_callback, }; +#endif static __init int mce_init_device(void) { diff --git a/trunk/arch/x86_64/kernel/mce_amd.c b/trunk/arch/x86_64/kernel/mce_amd.c index fa09debad4b7..883fe747f64c 100644 --- a/trunk/arch/x86_64/kernel/mce_amd.c +++ b/trunk/arch/x86_64/kernel/mce_amd.c @@ -551,6 +551,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu) return err; } +#ifdef CONFIG_HOTPLUG_CPU /* * let's be hotplug friendly. * in case of multiple core processors, the first core always takes ownership @@ -593,14 +594,12 @@ static void threshold_remove_bank(unsigned int cpu, int bank) sprintf(name, "threshold_bank%i", bank); -#ifdef CONFIG_SMP /* sibling symlink */ if (shared_bank[bank] && b->blocks->cpu != cpu) { sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); per_cpu(threshold_banks, cpu)[bank] = NULL; return; } -#endif /* remove all sibling symlinks before unregistering */ for_each_cpu_mask(i, b->cpus) { @@ -657,6 +656,7 @@ static int threshold_cpu_callback(struct notifier_block *nfb, static struct notifier_block threshold_cpu_notifier = { .notifier_call = threshold_cpu_callback, }; +#endif /* CONFIG_HOTPLUG_CPU */ static __init int threshold_init_device(void) { diff --git a/trunk/arch/x86_64/kernel/setup.c b/trunk/arch/x86_64/kernel/setup.c index f12f266f3e98..fc944b5e8f4a 100644 --- a/trunk/arch/x86_64/kernel/setup.c +++ b/trunk/arch/x86_64/kernel/setup.c @@ -471,7 +471,8 @@ void __init setup_arch(char **cmdline_p) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { reserve_bootmem_generic(INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START + PAGE_OFFSET; + initrd_start = + INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { diff --git a/trunk/arch/x86_64/kernel/smp.c b/trunk/arch/x86_64/kernel/smp.c index 32f4d7e2a060..9f74c883568c 100644 --- a/trunk/arch/x86_64/kernel/smp.c +++ b/trunk/arch/x86_64/kernel/smp.c @@ -379,10 +379,6 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, put_cpu(); return 0; } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - spin_lock_bh(&call_lock); __smp_call_function_single(cpu, func, info, nonatomic, wait); spin_unlock_bh(&call_lock); diff --git a/trunk/arch/x86_64/kernel/smpboot.c b/trunk/arch/x86_64/kernel/smpboot.c index 9800147c4c68..62c2e747af58 100644 --- a/trunk/arch/x86_64/kernel/smpboot.c +++ b/trunk/arch/x86_64/kernel/smpboot.c @@ -753,16 +753,14 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta } struct create_idle { - struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; -void do_fork_idle(struct work_struct *work) +void do_fork_idle(void *_c_idle) { - struct create_idle *c_idle = - container_of(work, struct create_idle, work); + struct create_idle *c_idle = _c_idle; c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); @@ -777,10 +775,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) int timeout; unsigned long start_rip; struct create_idle c_idle = { - .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; + DECLARE_WORK(work, do_fork_idle, &c_idle); /* allocate memory for gdts of secondary cpus. Hotplug is considered */ if (!cpu_gdt_descr[cpu].address && @@ -827,9 +825,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) * thread. */ if (!keventd_up() || current_is_keventd()) - c_idle.work.func(&c_idle.work); + work.func(work.data); else { - schedule_work(&c_idle.work); + schedule_work(&work); wait_for_completion(&c_idle.done); } diff --git a/trunk/arch/x86_64/kernel/time.c b/trunk/arch/x86_64/kernel/time.c index 9f05bc9b2dad..e3ef544d2cfb 100644 --- a/trunk/arch/x86_64/kernel/time.c +++ b/trunk/arch/x86_64/kernel/time.c @@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0; static unsigned int cpufreq_init = 0; static struct work_struct cpufreq_delayed_get_work; -static void handle_cpufreq_delayed_get(struct work_struct *v) +static void handle_cpufreq_delayed_get(void *v) { unsigned int cpu; for_each_online_cpu(cpu) { @@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = { static int __init cpufreq_tsc(void) { - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER)) cpufreq_init = 1; diff --git a/trunk/arch/x86_64/kernel/vsyscall.c b/trunk/arch/x86_64/kernel/vsyscall.c index 3785e4954734..92546c1526f1 100644 --- a/trunk/arch/x86_64/kernel/vsyscall.c +++ b/trunk/arch/x86_64/kernel/vsyscall.c @@ -42,7 +42,6 @@ #include #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) -#define __syscall_clobber "r11","rcx","memory" int __sysctl_vsyscall __section_sysctl_vsyscall = 1; seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; @@ -275,6 +274,7 @@ static void __cpuinit cpu_vsyscall_init(void *arg) vsyscall_set_cpu(raw_smp_processor_id()); } +#ifdef CONFIG_HOTPLUG_CPU static int __cpuinit cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { @@ -283,6 +283,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); return NOTIFY_DONE; } +#endif static void __init map_vsyscall(void) { diff --git a/trunk/block/as-iosched.c b/trunk/block/as-iosched.c index 5934c4bfd52a..00242111a457 100644 --- a/trunk/block/as-iosched.c +++ b/trunk/block/as-iosched.c @@ -1274,10 +1274,9 @@ static void as_merged_requests(request_queue_t *q, struct request *req, * * FIXME! dispatch queue is not a queue at all! */ -static void as_work_handler(struct work_struct *work) +static void as_work_handler(void *data) { - struct as_data *ad = container_of(work, struct as_data, antic_work); - struct request_queue *q = ad->q; + struct request_queue *q = data; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); @@ -1333,7 +1332,7 @@ static void *as_init_queue(request_queue_t *q) ad->antic_timer.function = as_antic_timeout; ad->antic_timer.data = (unsigned long)q; init_timer(&ad->antic_timer); - INIT_WORK(&ad->antic_work, as_work_handler); + INIT_WORK(&ad->antic_work, as_work_handler, q); INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); diff --git a/trunk/block/blktrace.c b/trunk/block/blktrace.c index d3679dd1d220..74e02c04b2da 100644 --- a/trunk/block/blktrace.c +++ b/trunk/block/blktrace.c @@ -394,7 +394,8 @@ static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, if (bt) { if (bt->dropped_file) debugfs_remove(bt->dropped_file); - free_percpu(bt->sequence); + if (bt->sequence) + free_percpu(bt->sequence); if (bt->rchan) relay_close(bt->rchan); kfree(bt); diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 78c6b312bd30..e9019ed39b73 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125; #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) #define RQ_CFQQ(rq) ((rq)->elevator_private2) -static struct kmem_cache *cfq_pool; -static struct kmem_cache *cfq_ioc_pool; +static kmem_cache_t *cfq_pool; +static kmem_cache_t *cfq_ioc_pool; static DEFINE_PER_CPU(unsigned long, ioc_count); static struct completion *ioc_gone; @@ -1840,11 +1840,9 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) return 1; } -static void cfq_kick_queue(struct work_struct *work) +static void cfq_kick_queue(void *data) { - struct cfq_data *cfqd = - container_of(work, struct cfq_data, unplug_work); - request_queue_t *q = cfqd->queue; + request_queue_t *q = data; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); @@ -1988,7 +1986,7 @@ static void *cfq_init_queue(request_queue_t *q) cfqd->idle_class_timer.function = cfq_idle_class_timer; cfqd->idle_class_timer.data = (unsigned long) cfqd; - INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; diff --git a/trunk/block/ll_rw_blk.c b/trunk/block/ll_rw_blk.c index 31512cd9f3ad..0f82e12f7b67 100644 --- a/trunk/block/ll_rw_blk.c +++ b/trunk/block/ll_rw_blk.c @@ -34,7 +34,7 @@ */ #include -static void blk_unplug_work(struct work_struct *work); +static void blk_unplug_work(void *data); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); @@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* * For the allocated request tables */ -static struct kmem_cache *request_cachep; +static kmem_cache_t *request_cachep; /* * For queue allocation */ -static struct kmem_cache *requestq_cachep; +static kmem_cache_t *requestq_cachep; /* * For io context allocations */ -static struct kmem_cache *iocontext_cachep; +static kmem_cache_t *iocontext_cachep; /* * Controlling structure to kblockd @@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) if (q->unplug_delay == 0) q->unplug_delay = 1; - INIT_WORK(&q->unplug_work, blk_unplug_work); + INIT_WORK(&q->unplug_work, blk_unplug_work, q); q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; @@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, } } -static void blk_unplug_work(struct work_struct *work) +static void blk_unplug_work(void *data) { - request_queue_t *q = container_of(work, request_queue_t, unplug_work); + request_queue_t *q = data; blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -3459,6 +3459,8 @@ static void blk_done_softirq(struct softirq_action *h) } } +#ifdef CONFIG_HOTPLUG_CPU + static int blk_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -3484,6 +3486,8 @@ static struct notifier_block __devinitdata blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; +#endif /* CONFIG_HOTPLUG_CPU */ + /** * blk_complete_request - end I/O on a request * @req: the request being processed diff --git a/trunk/crypto/cryptomgr.c b/trunk/crypto/cryptomgr.c index 2ebffb84f1d9..9b5b15601068 100644 --- a/trunk/crypto/cryptomgr.c +++ b/trunk/crypto/cryptomgr.c @@ -40,10 +40,9 @@ struct cryptomgr_param { char template[CRYPTO_MAX_ALG_NAME]; }; -static void cryptomgr_probe(struct work_struct *work) +static void cryptomgr_probe(void *data) { - struct cryptomgr_param *param = - container_of(work, struct cryptomgr_param, work); + struct cryptomgr_param *param = data; struct crypto_template *tmpl; struct crypto_instance *inst; int err; @@ -113,7 +112,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) param->larval.type = larval->alg.cra_flags; param->larval.mask = larval->mask; - INIT_WORK(¶m->work, cryptomgr_probe); + INIT_WORK(¶m->work, cryptomgr_probe, param); schedule_work(¶m->work); return NOTIFY_STOP; diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c index 02b30ae6a68e..068fe4f100b0 100644 --- a/trunk/drivers/acpi/osl.c +++ b/trunk/drivers/acpi/osl.c @@ -50,7 +50,6 @@ ACPI_MODULE_NAME("osl") struct acpi_os_dpc { acpi_osd_exec_callback function; void *context; - struct work_struct work; }; #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -565,9 +564,12 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); } -static void acpi_os_execute_deferred(struct work_struct *work) +static void acpi_os_execute_deferred(void *context) { - struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); + struct acpi_os_dpc *dpc = NULL; + + + dpc = (struct acpi_os_dpc *)context; if (!dpc) { printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); return; @@ -600,6 +602,7 @@ acpi_status acpi_os_execute(acpi_execute_type type, { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; + struct work_struct *task; ACPI_FUNCTION_TRACE("os_queue_for_execution"); @@ -612,22 +615,28 @@ acpi_status acpi_os_execute(acpi_execute_type type, /* * Allocate/initialize DPC structure. Note that this memory will be - * freed by the callee. The kernel handles the work_struct list in a + * freed by the callee. The kernel handles the tq_struct list in a * way that allows us to also free its memory inside the callee. * Because we may want to schedule several tasks with different * parameters we can't use the approach some kernel code uses of - * having a static work_struct. + * having a static tq_struct. + * We can save time and code by allocating the DPC and tq_structs + * from the same memory. */ - dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); + dpc = + kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), + GFP_ATOMIC); if (!dpc) return_ACPI_STATUS(AE_NO_MEMORY); dpc->function = function; dpc->context = context; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - if (!queue_work(kacpid_wq, &dpc->work)) { + task = (void *)(dpc + 1); + INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); + + if (!queue_work(kacpid_wq, task)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); kfree(dpc); diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 8816e30fb7a4..f8ec3896b793 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id) * ata_port_queue_task - Queue port_task * @ap: The ata_port to queue port_task for * @fn: workqueue function to be scheduled - * @data: data for @fn to use + * @data: data value to pass to workqueue function * @delay: delay time for workqueue function * * Schedule @fn(@data) for execution after @delay jiffies using @@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id) * LOCKING: * Inherited from caller. */ -void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, +void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, unsigned long delay) { int rc; @@ -1104,10 +1104,12 @@ void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) return; - PREPARE_DELAYED_WORK(&ap->port_task, fn); - ap->port_task_data = data; + PREPARE_WORK(&ap->port_task, fn, data); - rc = queue_delayed_work(ata_wq, &ap->port_task, delay); + if (!delay) + rc = queue_work(ata_wq, &ap->port_task); + else + rc = queue_delayed_work(ata_wq, &ap->port_task, delay); /* rc == 0 means that another user is using port task */ WARN_ON(rc == 0); @@ -4586,11 +4588,10 @@ int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, return poll_next; } -static void ata_pio_task(struct work_struct *work) +static void ata_pio_task(void *_data) { - struct ata_port *ap = - container_of(work, struct ata_port, port_task.work); - struct ata_queued_cmd *qc = ap->port_task_data; + struct ata_queued_cmd *qc = _data; + struct ata_port *ap = qc->ap; u8 status; int poll_next; @@ -5634,9 +5635,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host, ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif - INIT_DELAYED_WORK(&ap->port_task, NULL); - INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); - INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); + INIT_WORK(&ap->port_task, NULL, NULL); + INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); + INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index 08ad44b3e48f..76a85dfb7307 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host) if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); + queue_work(ata_aux_wq, &ap->hotplug_task); if (ap->pflags & ATA_PFLAG_RECOVERED) ata_port_printk(ap, KERN_INFO, "EH complete\n"); diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index 664e1377b54c..8eaace94d963 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev) /** * ata_scsi_hotplug - SCSI part of hotplug - * @work: Pointer to ATA port to perform SCSI hotplug on + * @data: Pointer to ATA port to perform SCSI hotplug on * * Perform SCSI part of hotplug. It's executed from a separate * workqueue after EH completes. This is necessary because SCSI @@ -2973,10 +2973,9 @@ static void ata_scsi_remove_dev(struct ata_device *dev) * LOCKING: * Kernel thread context (may sleep). */ -void ata_scsi_hotplug(struct work_struct *work) +void ata_scsi_hotplug(void *data) { - struct ata_port *ap = - container_of(work, struct ata_port, hotplug_task.work); + struct ata_port *ap = data; int i; if (ap->pflags & ATA_PFLAG_UNLOADING) { @@ -3077,7 +3076,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, /** * ata_scsi_dev_rescan - initiate scsi_rescan_device() - * @work: Pointer to ATA port to perform scsi_rescan_device() + * @data: Pointer to ATA port to perform scsi_rescan_device() * * After ATA pass thru (SAT) commands are executed successfully, * libata need to propagate the changes to SCSI layer. This @@ -3087,10 +3086,9 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, * LOCKING: * Kernel thread context (may sleep). */ -void ata_scsi_dev_rescan(struct work_struct *work) +void ata_scsi_dev_rescan(void *data) { - struct ata_port *ap = - container_of(work, struct ata_port, scsi_rescan_task); + struct ata_port *ap = data; unsigned long flags; unsigned int i; diff --git a/trunk/drivers/ata/libata.h b/trunk/drivers/ata/libata.h index 81ae41d5f23f..107b2b565229 100644 --- a/trunk/drivers/ata/libata.h +++ b/trunk/drivers/ata/libata.h @@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template; extern void ata_scsi_scan_host(struct ata_port *ap); extern int ata_scsi_offline_dev(struct ata_device *dev); -extern void ata_scsi_hotplug(struct work_struct *work); +extern void ata_scsi_hotplug(void *data); extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen); @@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, unsigned int (*actor) (struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen)); extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); -extern void ata_scsi_dev_rescan(struct work_struct *work); +extern void ata_scsi_dev_rescan(void *data); extern int ata_bus_probe(struct ata_port *ap); /* libata-eh.c */ diff --git a/trunk/drivers/atm/he.c b/trunk/drivers/atm/he.c index 7d9b4e52f0bf..c7314a79da0f 100644 --- a/trunk/drivers/atm/he.c +++ b/trunk/drivers/atm/he.c @@ -820,7 +820,7 @@ he_init_group(struct he_dev *he_dev, int group) void *cpuaddr; #ifdef USE_RBPS_POOL - cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle); + cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); if (cpuaddr == NULL) return -ENOMEM; #else @@ -884,7 +884,7 @@ he_init_group(struct he_dev *he_dev, int group) void *cpuaddr; #ifdef USE_RBPL_POOL - cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle); + cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); if (cpuaddr == NULL) return -ENOMEM; #else @@ -1724,7 +1724,7 @@ __alloc_tpd(struct he_dev *he_dev) struct he_tpd *tpd; dma_addr_t dma_handle; - tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle); + tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle); if (tpd == NULL) return NULL; diff --git a/trunk/drivers/atm/idt77252.c b/trunk/drivers/atm/idt77252.c index f40786121948..87b17c33b3f9 100644 --- a/trunk/drivers/atm/idt77252.c +++ b/trunk/drivers/atm/idt77252.c @@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags); static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, char *page); -static void idt77252_softint(struct work_struct *work); +static void idt77252_softint(void *dev_id); static struct atmdev_ops idt77252_ops = @@ -2866,10 +2866,9 @@ idt77252_interrupt(int irq, void *dev_id) } static void -idt77252_softint(struct work_struct *work) +idt77252_softint(void *dev_id) { - struct idt77252_dev *card = - container_of(work, struct idt77252_dev, tqueue); + struct idt77252_dev *card = dev_id; u32 stat; int done; @@ -3698,7 +3697,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) card->pcidev = pcidev; sprintf(card->name, "idt77252-%d", card->index); - INIT_WORK(&card->tqueue, idt77252_softint); + INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); membase = pci_resource_start(pcidev, 1); srambase = pci_resource_start(pcidev, 2); diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index 67b79a7592a9..e4b530ef757d 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -386,7 +386,6 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->node); init_MUTEX(&dev->sem); device_init_wakeup(dev, 0); - set_dev_node(dev, -1); } #ifdef CONFIG_SYSFS_DEPRECATED diff --git a/trunk/drivers/base/dmapool.c b/trunk/drivers/base/dmapool.c index dbe0735f8c9e..b2efbd4cf710 100644 --- a/trunk/drivers/base/dmapool.c +++ b/trunk/drivers/base/dmapool.c @@ -126,7 +126,7 @@ dma_pool_create (const char *name, struct device *dev, } else if (allocation < size) return NULL; - if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL))) + if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) return retval; strlcpy (retval->name, name, sizeof retval->name); @@ -297,7 +297,7 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) } } } - if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { + if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { if (mem_flags & __GFP_WAIT) { DECLARE_WAITQUEUE (wait, current); diff --git a/trunk/drivers/base/memory.c b/trunk/drivers/base/memory.c index 74b96795d2f5..c6b7d9c4b651 100644 --- a/trunk/drivers/base/memory.c +++ b/trunk/drivers/base/memory.c @@ -290,8 +290,9 @@ static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); static int block_size_init(void) { - return sysfs_create_file(&memory_sysdev_class.kset.kobj, - &class_attr_block_size_bytes.attr); + sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_block_size_bytes.attr); + return 0; } /* @@ -322,14 +323,12 @@ static CLASS_ATTR(probe, 0700, NULL, memory_probe_store); static int memory_probe_init(void) { - return sysfs_create_file(&memory_sysdev_class.kset.kobj, - &class_attr_probe.attr); -} -#else -static inline int memory_probe_init(void) -{ + sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_probe.attr); return 0; } +#else +#define memory_probe_init(...) do {} while (0) #endif /* @@ -432,12 +431,9 @@ int __init memory_dev_init(void) { unsigned int i; int ret; - int err; memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; ret = sysdev_class_register(&memory_sysdev_class); - if (ret) - goto out; /* * Create entries for memory sections that were found @@ -446,19 +442,11 @@ int __init memory_dev_init(void) for (i = 0; i < NR_MEM_SECTIONS; i++) { if (!valid_section_nr(i)) continue; - err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0); - if (!ret) - ret = err; + add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0); } - err = memory_probe_init(); - if (!ret) - ret = err; - err = block_size_init(); - if (!ret) - ret = err; -out: - if (ret) - printk(KERN_ERR "%s() failed: %d\n", __FUNCTION__, ret); + memory_probe_init(); + block_size_init(); + return ret; } diff --git a/trunk/drivers/base/topology.c b/trunk/drivers/base/topology.c index 067a9e8bc377..3d12b85b0962 100644 --- a/trunk/drivers/base/topology.c +++ b/trunk/drivers/base/topology.c @@ -108,6 +108,7 @@ static int __cpuinit topology_add_dev(unsigned int cpu) return rc; } +#ifdef CONFIG_HOTPLUG_CPU static void __cpuinit topology_remove_dev(unsigned int cpu) { struct sys_device *sys_dev = get_cpu_sysdev(cpu); @@ -135,6 +136,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, } return rc ? NOTIFY_BAD : NOTIFY_OK; } +#endif static int __cpuinit topology_sysfs_init(void) { diff --git a/trunk/drivers/block/DAC960.c b/trunk/drivers/block/DAC960.c index 8d81a3a64c07..742d07403101 100644 --- a/trunk/drivers/block/DAC960.c +++ b/trunk/drivers/block/DAC960.c @@ -324,13 +324,13 @@ static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller) Command->Next = Controller->FreeCommands; Controller->FreeCommands = Command; Controller->Commands[CommandIdentifier-1] = Command; - ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC, + ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC, &ScatterGatherDMA); if (ScatterGatherCPU == NULL) return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION"); if (RequestSensePool != NULL) { - RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC, + RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC, &RequestSenseDMA); if (RequestSenseCPU == NULL) { pci_pool_free(ScatterGatherPool, ScatterGatherCPU, diff --git a/trunk/drivers/block/Kconfig b/trunk/drivers/block/Kconfig index 432c17e1e0e6..17dc22282e14 100644 --- a/trunk/drivers/block/Kconfig +++ b/trunk/drivers/block/Kconfig @@ -168,8 +168,7 @@ config BLK_CPQ_CISS_DA config CISS_SCSI_TAPE bool "SCSI tape drive support for Smart Array 5xxx" - depends on BLK_CPQ_CISS_DA && PROC_FS - depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA + depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS help When enabled (Y), this option allows SCSI tape drives and SCSI medium changers (tape robots) to be accessed via a Compaq 5xxx array diff --git a/trunk/drivers/block/aoe/aoe.h b/trunk/drivers/block/aoe/aoe.h index 2308e83e5f33..6d111228cfac 100644 --- a/trunk/drivers/block/aoe/aoe.h +++ b/trunk/drivers/block/aoe/aoe.h @@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d); void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); void aoecmd_ata_rsp(struct sk_buff *); void aoecmd_cfg_rsp(struct sk_buff *); -void aoecmd_sleepwork(struct work_struct *); +void aoecmd_sleepwork(void *vp); struct sk_buff *new_skb(ulong); int aoedev_init(void); diff --git a/trunk/drivers/block/aoe/aoeblk.c b/trunk/drivers/block/aoe/aoeblk.c index 478489c568a4..aa25f8b09fe3 100644 --- a/trunk/drivers/block/aoe/aoeblk.c +++ b/trunk/drivers/block/aoe/aoeblk.c @@ -12,7 +12,7 @@ #include #include "aoe.h" -static struct kmem_cache *buf_pool_cache; +static kmem_cache_t *buf_pool_cache; static ssize_t aoedisk_show_state(struct gendisk * disk, char *page) { diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c index 97f7f535f412..8a13b1af8bab 100644 --- a/trunk/drivers/block/aoe/aoecmd.c +++ b/trunk/drivers/block/aoe/aoecmd.c @@ -408,9 +408,9 @@ rexmit_timer(ulong vp) /* this function performs work that has been deferred until sleeping is OK */ void -aoecmd_sleepwork(struct work_struct *work) +aoecmd_sleepwork(void *vp) { - struct aoedev *d = container_of(work, struct aoedev, work); + struct aoedev *d = (struct aoedev *) vp; if (d->flags & DEVFL_GDALLOC) aoeblk_gdalloc(d); diff --git a/trunk/drivers/block/aoe/aoedev.c b/trunk/drivers/block/aoe/aoedev.c index 05a97197c918..6125921bbec4 100644 --- a/trunk/drivers/block/aoe/aoedev.c +++ b/trunk/drivers/block/aoe/aoedev.c @@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes) kfree(d); return NULL; } - INIT_WORK(&d->work, aoecmd_sleepwork); + INIT_WORK(&d->work, aoecmd_sleepwork, d); spin_lock_init(&d->lock); init_timer(&d->timer); d->timer.data = (ulong) d; diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c index 892e092afe9a..4105c3bf3476 100644 --- a/trunk/drivers/block/cciss.c +++ b/trunk/drivers/block/cciss.c @@ -47,15 +47,14 @@ #include #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) -#define DRIVER_NAME "HP CISS Driver (v 3.6.14)" -#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14) +#define DRIVER_NAME "HP CISS Driver (v 3.6.10)" +#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); -MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14"); +MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10"); MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" " SA6i P600 P800 P400 P400i E200 E200i E500"); -MODULE_VERSION("3.6.14"); MODULE_LICENSE("GPL"); #include "cciss_cmd.h" @@ -82,9 +81,7 @@ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233}, {0,} }; @@ -93,29 +90,27 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers - * nr_cmds = Number of commands supported by controller */ static struct board_type products[] = { - {0x40700E11, "Smart Array 5300", &SA5_access, 512}, - {0x40800E11, "Smart Array 5i", &SA5B_access, 512}, - {0x40820E11, "Smart Array 532", &SA5B_access, 512}, - {0x40830E11, "Smart Array 5312", &SA5B_access, 512}, - {0x409A0E11, "Smart Array 641", &SA5_access, 512}, - {0x409B0E11, "Smart Array 642", &SA5_access, 512}, - {0x409C0E11, "Smart Array 6400", &SA5_access, 512}, - {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512}, - {0x40910E11, "Smart Array 6i", &SA5_access, 512}, - {0x3225103C, "Smart Array P600", &SA5_access, 512}, - {0x3223103C, "Smart Array P800", &SA5_access, 512}, - {0x3234103C, "Smart Array P400", &SA5_access, 512}, - {0x3235103C, "Smart Array P400i", &SA5_access, 512}, - {0x3211103C, "Smart Array E200i", &SA5_access, 120}, - {0x3212103C, "Smart Array E200", &SA5_access, 120}, - {0x3213103C, "Smart Array E200i", &SA5_access, 120}, - {0x3214103C, "Smart Array E200i", &SA5_access, 120}, - {0x3215103C, "Smart Array E200i", &SA5_access, 120}, - {0x3237103C, "Smart Array E500", &SA5_access, 512}, - {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120}, + {0x40700E11, "Smart Array 5300", &SA5_access}, + {0x40800E11, "Smart Array 5i", &SA5B_access}, + {0x40820E11, "Smart Array 532", &SA5B_access}, + {0x40830E11, "Smart Array 5312", &SA5B_access}, + {0x409A0E11, "Smart Array 641", &SA5_access}, + {0x409B0E11, "Smart Array 642", &SA5_access}, + {0x409C0E11, "Smart Array 6400", &SA5_access}, + {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, + {0x40910E11, "Smart Array 6i", &SA5_access}, + {0x3225103C, "Smart Array P600", &SA5_access}, + {0x3223103C, "Smart Array P800", &SA5_access}, + {0x3234103C, "Smart Array P400", &SA5_access}, + {0x3235103C, "Smart Array P400i", &SA5_access}, + {0x3211103C, "Smart Array E200i", &SA5_access}, + {0x3212103C, "Smart Array E200", &SA5_access}, + {0x3213103C, "Smart Array E200i", &SA5_access}, + {0x3214103C, "Smart Array E200i", &SA5_access}, + {0x3215103C, "Smart Array E200i", &SA5_access}, + {0x3233103C, "Smart Array E500", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ @@ -126,6 +121,7 @@ static struct board_type products[] = { #define MAX_CMD_RETRIES 3 #define READ_AHEAD 1024 +#define NR_CMDS 384 /* #commands that can be outstanding */ #define MAX_CTLR 32 /* Originally cciss driver only supports 8 major numbers */ @@ -141,6 +137,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); +static int revalidate_allvol(ctlr_info_t *host); static int cciss_revalidate(struct gendisk *disk); static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk); static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, @@ -268,7 +265,6 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, "Firmware Version: %c%c%c%c\n" "IRQ: %d\n" "Logical drives: %d\n" - "Max sectors: %d\n" "Current Q depth: %d\n" "Current # commands on controller: %d\n" "Max Q depth since init: %d\n" @@ -279,9 +275,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, (unsigned long)h->board_id, h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], - h->num_luns, - h->cciss_max_sectors, - h->Qdepth, h->commands_outstanding, + h->num_luns, h->Qdepth, h->commands_outstanding, h->maxQsinceinit, h->max_outstanding, h->maxSG); pos += size; @@ -406,8 +400,8 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) } else { /* get it out of the controllers pool */ do { - i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); - if (i == h->nr_cmds) + i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS); + if (i == NR_CMDS) return NULL; } while (test_and_set_bit (i & (BITS_PER_LONG - 1), @@ -493,7 +487,7 @@ static int cciss_open(struct inode *inode, struct file *filep) * but I'm already using way to many device nodes to claim another one * for "raw controller". */ - if (drv->heads == 0) { + if (drv->nr_blocks == 0) { if (iminor(inode) != 0) { /* not node 0? */ /* if not node 0 make sure it is a partition = 0 */ if (iminor(inode) & 0x0f) { @@ -856,7 +850,9 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, } case CCISS_REVALIDVOLS: - return rebuild_lun_table(host, NULL); + if (bdev != bdev->bd_contains || drv != host->drv) + return -ENXIO; + return revalidate_allvol(host); case CCISS_GETLUNINFO:{ LogvolInfo_struct luninfo; @@ -1156,6 +1152,75 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, } } +/* + * revalidate_allvol is for online array config utilities. After a + * utility reconfigures the drives in the array, it can use this function + * (through an ioctl) to make the driver zap any previous disk structs for + * that controller and get new ones. + * + * Right now I'm using the getgeometry() function to do this, but this + * function should probably be finer grained and allow you to revalidate one + * particular logical volume (instead of all of them on a particular + * controller). + */ +static int revalidate_allvol(ctlr_info_t *host) +{ + int ctlr = host->ctlr, i; + unsigned long flags; + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + if (host->usage_count > 1) { + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + printk(KERN_WARNING "cciss: Device busy for volume" + " revalidation (usage=%d)\n", host->usage_count); + return -EBUSY; + } + host->usage_count++; + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + + for (i = 0; i < NWD; i++) { + struct gendisk *disk = host->gendisk[i]; + if (disk) { + request_queue_t *q = disk->queue; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) + blk_cleanup_queue(q); + } + } + + /* + * Set the partition and block size structures for all volumes + * on this controller to zero. We will reread all of this data + */ + memset(host->drv, 0, sizeof(drive_info_struct) + * CISS_MAX_LUN); + /* + * Tell the array controller not to give us any interrupts while + * we check the new geometry. Then turn interrupts back on when + * we're done. + */ + host->access.set_intr_mask(host, CCISS_INTR_OFF); + cciss_getgeometry(ctlr); + host->access.set_intr_mask(host, CCISS_INTR_ON); + + /* Loop through each real device */ + for (i = 0; i < NWD; i++) { + struct gendisk *disk = host->gendisk[i]; + drive_info_struct *drv = &(host->drv[i]); + /* we must register the controller even if no disks exist */ + /* this is for the online array utilities */ + if (!drv->heads && i) + continue; + blk_queue_hardsect_size(drv->queue, drv->block_size); + set_capacity(disk, drv->nr_blocks); + add_disk(disk); + } + host->usage_count--; + return 0; +} + static inline void complete_buffers(struct bio *bio, int status) { while (bio) { @@ -1178,7 +1243,7 @@ static void cciss_check_queues(ctlr_info_t *h) * in case the interrupt we serviced was from an ioctl and did not * free any new commands. */ - if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) return; /* We have room on the queue for more commands. Now we need to queue @@ -1197,7 +1262,7 @@ static void cciss_check_queues(ctlr_info_t *h) /* check to see if we have maxed out the number of commands * that can be placed on the queue. */ - if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { + if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { if (curr_queue == start_queue) { h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); @@ -1315,11 +1380,6 @@ static void cciss_update_drive_info(int ctlr, int drv_index) /* if it's the controller it's already added */ if (drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); - sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index); - disk->major = h->major; - disk->first_minor = drv_index << NWD_SHIFT; - disk->fops = &cciss_fops; - disk->private_data = &h->drv[drv_index]; /* Set up queue information */ disk->queue->backing_dev_info.ra_pages = READ_AHEAD; @@ -1331,7 +1391,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index) /* This is a limit in the driver and could be eliminated. */ blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); - blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); + blk_queue_max_sectors(disk->queue, 512); blk_queue_softirq_done(disk->queue, cciss_softirq_done); @@ -1398,6 +1458,11 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) /* Set busy_configuring flag for this operation */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->num_luns >= CISS_MAX_LUN) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EINVAL; + } + if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -EBUSY; @@ -1430,8 +1495,17 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) 0, 0, TYPE_CMD); if (return_code == IO_OK) { - listlength = - be32_to_cpu(*(__u32 *) ld_buff->LUNListLength); + listlength |= + (0xff & (unsigned int)(ld_buff->LUNListLength[0])) + << 24; + listlength |= + (0xff & (unsigned int)(ld_buff->LUNListLength[1])) + << 16; + listlength |= + (0xff & (unsigned int)(ld_buff->LUNListLength[2])) + << 8; + listlength |= + 0xff & (unsigned int)(ld_buff->LUNListLength[3]); } else { /* reading number of logical volumes failed */ printk(KERN_WARNING "cciss: report logical volume" " command failed\n"); @@ -1482,14 +1556,6 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) if (drv_index == -1) goto freeret; - /*Check if the gendisk needs to be allocated */ - if (!h->gendisk[drv_index]){ - h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT); - if (!h->gendisk[drv_index]){ - printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index); - goto mem_msg; - } - } } h->drv[drv_index].LunID = lunid; cciss_update_drive_info(ctlr, drv_index); @@ -1527,7 +1593,6 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all) { - int i; ctlr_info_t *h = get_host(disk); if (!capable(CAP_SYS_RAWIO)) @@ -1551,35 +1616,9 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, del_gendisk(disk); if (q) { blk_cleanup_queue(q); - /* Set drv->queue to NULL so that we do not try - * to call blk_start_queue on this queue in the - * interrupt handler - */ drv->queue = NULL; } - /* If clear_all is set then we are deleting the logical - * drive, not just refreshing its info. For drives - * other than disk 0 we will call put_disk. We do not - * do this for disk 0 as we need it to be able to - * configure the controller. - */ - if (clear_all){ - /* This isn't pretty, but we need to find the - * disk in our array and NULL our the pointer. - * This is so that we will call alloc_disk if - * this index is used again later. - */ - for (i=0; i < CISS_MAX_LUN; i++){ - if(h->gendisk[i] == disk){ - h->gendisk[i] = NULL; - break; - } - } - put_disk(disk); - } } - } else { - set_capacity(disk, 0); } --h->num_luns; @@ -2097,7 +2136,7 @@ static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) /* We've sent down an abort or reset, but something else has completed */ - if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) { + if (srl->ncompletions >= (NR_CMDS + 2)) { /* Uh oh. No room to save it for later... */ printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, " "reject list overflow, command lost!\n", ctlr); @@ -2634,7 +2673,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) a1 = a; if ((a & 0x04)) { a2 = (a >> 3); - if (a2 >= h->nr_cmds) { + if (a2 >= NR_CMDS) { printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr); @@ -2788,21 +2827,23 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, if (err > 0) { printk(KERN_WARNING "cciss: only %d MSI-X vectors " "available\n", err); - goto default_int_mode; } else { printk(KERN_WARNING "cciss: MSI-X init failed %d\n", err); - goto default_int_mode; } } if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { if (!pci_enable_msi(pdev)) { + c->intr[SIMPLE_MODE_INT] = pdev->irq; c->msi_vector = 1; + return; } else { printk(KERN_WARNING "cciss: MSI init failed\n"); + c->intr[SIMPLE_MODE_INT] = pdev->irq; + return; } } -default_int_mode: + default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ c->intr[SIMPLE_MODE_INT] = pdev->irq; @@ -2915,10 +2956,16 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) if (board_id == products[i].board_id) { c->product_name = products[i].product_name; c->access = *(products[i].access); - c->nr_cmds = products[i].nr_cmds; break; } } + if (i == ARRAY_SIZE(products)) { + printk(KERN_WARNING "cciss: Sorry, I don't know how" + " to access the Smart Array controller %08lx\n", + (unsigned long)board_id); + err = -ENODEV; + goto err_out_free_res; + } if ((readb(&c->cfgtable->Signature[0]) != 'C') || (readb(&c->cfgtable->Signature[1]) != 'I') || (readb(&c->cfgtable->Signature[2]) != 'S') || @@ -2927,27 +2974,6 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) err = -ENODEV; goto err_out_free_res; } - /* We didn't find the controller in our list. We know the - * signature is valid. If it's an HP device let's try to - * bind to the device and fire it up. Otherwise we bail. - */ - if (i == ARRAY_SIZE(products)) { - if (subsystem_vendor_id == PCI_VENDOR_ID_HP) { - c->product_name = products[i-1].product_name; - c->access = *(products[i-1].access); - c->nr_cmds = products[i-1].nr_cmds; - printk(KERN_WARNING "cciss: This is an unknown " - "Smart Array controller.\n" - "cciss: Please update to the latest driver " - "available from www.hp.com.\n"); - } else { - printk(KERN_WARNING "cciss: Sorry, I don't know how" - " to access the Smart Array controller %08lx\n" - , (unsigned long)board_id); - err = -ENODEV; - goto err_out_free_res; - } - } #ifdef CONFIG_X86 { /* Need to enable prefetch in the SCSI core for 6400 in x86 */ @@ -2958,17 +2984,6 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) } #endif - /* Disabling DMA prefetch for the P600 - * An ASIC bug may result in a prefetch beyond - * physical memory. - */ - if(board_id == 0x3225103C) { - __u32 dma_prefetch; - dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); - dma_prefetch |= 0x8000; - writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG); - } - #ifdef CCISS_DEBUG printk("Trying to put board into Simple mode\n"); #endif /* CCISS_DEBUG */ @@ -3143,7 +3158,13 @@ static void cciss_getgeometry(int cntl_num) /* Returns -1 if no free entries are left. */ static int alloc_cciss_hba(void) { - int i; + struct gendisk *disk[NWD]; + int i, n; + for (n = 0; n < NWD; n++) { + disk[n] = alloc_disk(1 << NWD_SHIFT); + if (!disk[n]) + goto out; + } for (i = 0; i < MAX_CTLR; i++) { if (!hba[i]) { @@ -3151,18 +3172,20 @@ static int alloc_cciss_hba(void) p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); if (!p) goto Enomem; - p->gendisk[0] = alloc_disk(1 << NWD_SHIFT); - if (!p->gendisk[0]) - goto Enomem; + for (n = 0; n < NWD; n++) + p->gendisk[n] = disk[n]; hba[i] = p; return i; } } printk(KERN_WARNING "cciss: This driver supports a maximum" " of %d controllers.\n", MAX_CTLR); - return -1; -Enomem: + goto out; + Enomem: printk(KERN_ERR "cciss: out of memory.\n"); + out: + while (n--) + put_disk(disk[n]); return -1; } @@ -3172,7 +3195,7 @@ static void free_hba(int i) int n; hba[i] = NULL; - for (n = 0; n < CISS_MAX_LUN; n++) + for (n = 0; n < NWD; n++) put_disk(p->gendisk[n]); kfree(p); } @@ -3185,8 +3208,9 @@ static void free_hba(int i) static int __devinit cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { + request_queue_t *q; int i; - int j = 0; + int j; int rc; int dac; @@ -3245,15 +3269,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); hba[i]->cmd_pool_bits = - kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG - + kmalloc(((NR_CMDS + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); hba[i]->cmd_pool = (CommandList_struct *) pci_alloc_consistent(hba[i]->pdev, - hba[i]->nr_cmds * sizeof(CommandList_struct), + NR_CMDS * sizeof(CommandList_struct), &(hba[i]->cmd_pool_dhandle)); hba[i]->errinfo_pool = (ErrorInfo_struct *) pci_alloc_consistent(hba[i]->pdev, - hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + NR_CMDS * sizeof(ErrorInfo_struct), &(hba[i]->errinfo_pool_dhandle)); if ((hba[i]->cmd_pool_bits == NULL) || (hba[i]->cmd_pool == NULL) @@ -3264,7 +3288,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, #ifdef CONFIG_CISS_SCSI_TAPE hba[i]->scsi_rejects.complete = kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) * - (hba[i]->nr_cmds + 5), GFP_KERNEL); + (NR_CMDS + 5), GFP_KERNEL); if (hba[i]->scsi_rejects.complete == NULL) { printk(KERN_ERR "cciss: out of memory"); goto clean4; @@ -3278,7 +3302,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, /* command and error info recs zeroed out before they are used */ memset(hba[i]->cmd_pool_bits, 0, - ((hba[i]->nr_cmds + BITS_PER_LONG - + ((NR_CMDS + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof(unsigned long)); #ifdef CCISS_DEBUG @@ -3293,34 +3317,18 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); cciss_procinit(i); - - hba[i]->cciss_max_sectors = 2048; - hba[i]->busy_initializing = 0; - do { + for (j = 0; j < NWD; j++) { /* mfm */ drive_info_struct *drv = &(hba[i]->drv[j]); struct gendisk *disk = hba[i]->gendisk[j]; - request_queue_t *q; - - /* Check if the disk was allocated already */ - if (!disk){ - hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT); - disk = hba[i]->gendisk[j]; - } - - /* Check that the disk was able to be allocated */ - if (!disk) { - printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j); - goto clean4; - } q = blk_init_queue(do_cciss_request, &hba[i]->lock); if (!q) { printk(KERN_ERR "cciss: unable to allocate queue for disk %d\n", j); - goto clean4; + break; } drv->queue = q; @@ -3333,7 +3341,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, /* This is a limit in the driver and could be eliminated. */ blk_queue_max_phys_segments(q, MAXSGENTRIES); - blk_queue_max_sectors(q, hba[i]->cciss_max_sectors); + blk_queue_max_sectors(q, 512); blk_queue_softirq_done(q, cciss_softirq_done); @@ -3352,8 +3360,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, blk_queue_hardsect_size(q, drv->block_size); set_capacity(disk, drv->nr_blocks); add_disk(disk); - j++; - } while (j <= hba[i]->highest_lun); + } return 1; @@ -3364,11 +3371,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, kfree(hba[i]->cmd_pool_bits); if (hba[i]->cmd_pool) pci_free_consistent(hba[i]->pdev, - hba[i]->nr_cmds * sizeof(CommandList_struct), + NR_CMDS * sizeof(CommandList_struct), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); if (hba[i]->errinfo_pool) pci_free_consistent(hba[i]->pdev, - hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + NR_CMDS * sizeof(ErrorInfo_struct), hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); @@ -3376,15 +3383,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, unregister_blkdev(hba[i]->major, hba[i]->devname); clean1: hba[i]->busy_initializing = 0; - /* cleanup any queues that may have been initialized */ - for (j=0; j <= hba[i]->highest_lun; j++){ - drive_info_struct *drv = &(hba[i]->drv[j]); - if (drv->queue) - blk_cleanup_queue(drv->queue); - } - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_hba(i); return -1; } @@ -3432,7 +3430,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) remove_proc_entry(hba[i]->devname, proc_cciss); /* remove it from the disk list */ - for (j = 0; j < CISS_MAX_LUN; j++) { + for (j = 0; j < NWD; j++) { struct gendisk *disk = hba[i]->gendisk[j]; if (disk) { request_queue_t *q = disk->queue; @@ -3444,9 +3442,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) } } - pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), + pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); - pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct), hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); kfree(hba[i]->cmd_pool_bits); #ifdef CONFIG_CISS_SCSI_TAPE diff --git a/trunk/drivers/block/cciss.h b/trunk/drivers/block/cciss.h index b70988dd33ec..562235c1445a 100644 --- a/trunk/drivers/block/cciss.h +++ b/trunk/drivers/block/cciss.h @@ -6,6 +6,7 @@ #include "cciss_cmd.h" +#define NWD 16 #define NWD_SHIFT 4 #define MAX_PART (1 << NWD_SHIFT) @@ -59,7 +60,6 @@ struct ctlr_info __u32 board_id; void __iomem *vaddr; unsigned long paddr; - int nr_cmds; /* Number of commands allowed on this controller */ CfgTable_struct __iomem *cfgtable; int interrupts_enabled; int major; @@ -76,7 +76,6 @@ struct ctlr_info unsigned int intr[4]; unsigned int msix_vector; unsigned int msi_vector; - int cciss_max_sectors; BYTE cciss_read; BYTE cciss_write; BYTE cciss_read_capacity; @@ -111,7 +110,7 @@ struct ctlr_info int next_to_run; // Disk structures we need to pass back - struct gendisk *gendisk[CISS_MAX_LUN]; + struct gendisk *gendisk[NWD]; #ifdef CONFIG_CISS_SCSI_TAPE void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ /* list of block side commands the scsi error handling sucked up */ @@ -283,7 +282,6 @@ struct board_type { __u32 board_id; char *product_name; struct access_method *access; - int nr_cmds; /* Max cmds this kind of ctlr can handle. */ }; #define CCISS_LOCK(i) (&hba[i]->lock) diff --git a/trunk/drivers/block/cciss_cmd.h b/trunk/drivers/block/cciss_cmd.h index 43bf5593b59b..4af7c4c0c7af 100644 --- a/trunk/drivers/block/cciss_cmd.h +++ b/trunk/drivers/block/cciss_cmd.h @@ -55,7 +55,6 @@ #define I2O_INT_MASK 0x34 #define I2O_IBPOST_Q 0x40 #define I2O_OBPOST_Q 0x44 -#define I2O_DMA1_CFG 0x214 //Configuration Table #define CFGTBL_ChangeReq 0x00000001l @@ -89,7 +88,7 @@ typedef union _u64bit //########################################################################### //STRUCTURES //########################################################################### -#define CISS_MAX_LUN 1024 +#define CISS_MAX_LUN 16 #define CISS_MAX_PHYS_LUN 1024 // SCSI-3 Cmmands diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index 3f1b38276e96..9e6d3a87cbe3 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -992,11 +992,11 @@ static void empty(void) { } -static DECLARE_WORK(floppy_work, NULL); +static DECLARE_WORK(floppy_work, NULL, NULL); static void schedule_bh(void (*handler) (void)) { - PREPARE_WORK(&floppy_work, (work_func_t)handler); + PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL); schedule_work(&floppy_work); } @@ -1008,7 +1008,7 @@ static void cancel_activity(void) spin_lock_irqsave(&floppy_lock, flags); do_floppy = NULL; - PREPARE_WORK(&floppy_work, (work_func_t)empty); + PREPARE_WORK(&floppy_work, (void *)empty, NULL); del_timer(&fd_timer); spin_unlock_irqrestore(&floppy_lock, flags); } @@ -1868,7 +1868,7 @@ static void show_floppy(void) printk("fdc_busy=%lu\n", fdc_busy); if (do_floppy) printk("do_floppy=%p\n", do_floppy); - if (work_pending(&floppy_work)) + if (floppy_work.pending) printk("floppy_work.func=%p\n", floppy_work.func); if (timer_pending(&fd_timer)) printk("fd_timer.function=%p\n", fd_timer.function); @@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void) printk("floppy timer still active:%s\n", timeout_message); if (timer_pending(&fd_timer)) printk("auxiliary floppy timer still active\n"); - if (work_pending(&floppy_work)) + if (floppy_work.pending) printk("work still pending\n"); #endif old_fdc = fdc; diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index 7bf2cfbd6285..9d1035e8d9d8 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -355,30 +355,14 @@ static struct request *nbd_read_stat(struct nbd_device *lo) return NULL; } -static ssize_t pid_show(struct gendisk *disk, char *page) -{ - return sprintf(page, "%ld\n", - (long) ((struct nbd_device *)disk->private_data)->pid); -} - -static struct disk_attribute pid_attr = { - .attr = { .name = "pid", .mode = S_IRUGO }, - .show = pid_show, -}; - static void nbd_do_it(struct nbd_device *lo) { struct request *req; BUG_ON(lo->magic != LO_MAGIC); - lo->pid = current->pid; - sysfs_create_file(&lo->disk->kobj, &pid_attr.attr); - while ((req = nbd_read_stat(lo)) != NULL) nbd_end_request(req); - - sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr); return; } diff --git a/trunk/drivers/block/paride/aten.c b/trunk/drivers/block/paride/aten.c index 2695465568ad..c4d696d43dc1 100644 --- a/trunk/drivers/block/paride/aten.c +++ b/trunk/drivers/block/paride/aten.c @@ -149,12 +149,12 @@ static struct pi_protocol aten = { static int __init aten_init(void) { - return paride_register(&aten); + return pi_register(&aten)-1; } static void __exit aten_exit(void) { - paride_unregister( &aten ); + pi_unregister( &aten ); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/bpck.c b/trunk/drivers/block/paride/bpck.c index 4f27e7392e38..d462ff6b139d 100644 --- a/trunk/drivers/block/paride/bpck.c +++ b/trunk/drivers/block/paride/bpck.c @@ -464,12 +464,12 @@ static struct pi_protocol bpck = { static int __init bpck_init(void) { - return paride_register(&bpck); + return pi_register(&bpck)-1; } static void __exit bpck_exit(void) { - paride_unregister(&bpck); + pi_unregister(&bpck); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/bpck6.c b/trunk/drivers/block/paride/bpck6.c index ad124525ac23..41a237c5957d 100644 --- a/trunk/drivers/block/paride/bpck6.c +++ b/trunk/drivers/block/paride/bpck6.c @@ -31,7 +31,10 @@ static int verbose; /* set this to 1 to see debugging messages and whatnot */ #include #include #include + +#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT) #include +#endif #include "ppc6lnx.c" #include "paride.h" @@ -136,6 +139,11 @@ static int bpck6_test_port ( PIA *pi ) /* check for 8-bit port */ PPCSTRUCT(pi)->ppc_id=pi->unit; PPCSTRUCT(pi)->lpt_addr=pi->port; +#ifdef CONFIG_PARPORT_PC_MODULE +#define CONFIG_PARPORT_PC +#endif + +#ifdef CONFIG_PARPORT_PC /* look at the parport device to see if what modes we can use */ if(((struct pardevice *)(pi->pardev))->port->modes & (PARPORT_MODE_EPP) @@ -153,6 +161,11 @@ static int bpck6_test_port ( PIA *pi ) /* check for 8-bit port */ { return 1; } +#else + /* there is no way of knowing what kind of port we have + default to the highest mode possible */ + return 5; +#endif } static int bpck6_probe_unit ( PIA *pi ) @@ -252,12 +265,12 @@ static int __init bpck6_init(void) printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n"); if(verbose) printk(KERN_DEBUG "bpck6: verbose debug enabled.\n"); - return paride_register(&bpck6); + return pi_register(&bpck6) - 1; } static void __exit bpck6_exit(void) { - paride_unregister(&bpck6); + pi_unregister(&bpck6); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/comm.c b/trunk/drivers/block/paride/comm.c index 9bcd35495323..43d61359d8ec 100644 --- a/trunk/drivers/block/paride/comm.c +++ b/trunk/drivers/block/paride/comm.c @@ -205,12 +205,12 @@ static struct pi_protocol comm = { static int __init comm_init(void) { - return paride_register(&comm); + return pi_register(&comm)-1; } static void __exit comm_exit(void) { - paride_unregister(&comm); + pi_unregister(&comm); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/dstr.c b/trunk/drivers/block/paride/dstr.c index accc5c777cbb..04d53bf58e8c 100644 --- a/trunk/drivers/block/paride/dstr.c +++ b/trunk/drivers/block/paride/dstr.c @@ -220,12 +220,12 @@ static struct pi_protocol dstr = { static int __init dstr_init(void) { - return paride_register(&dstr); + return pi_register(&dstr)-1; } static void __exit dstr_exit(void) { - paride_unregister(&dstr); + pi_unregister(&dstr); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/epat.c b/trunk/drivers/block/paride/epat.c index 1bcdff77322e..55d1c0a1fb90 100644 --- a/trunk/drivers/block/paride/epat.c +++ b/trunk/drivers/block/paride/epat.c @@ -327,12 +327,12 @@ static int __init epat_init(void) #ifdef CONFIG_PARIDE_EPATC8 epatc8 = 1; #endif - return paride_register(&epat); + return pi_register(&epat)-1; } static void __exit epat_exit(void) { - paride_unregister(&epat); + pi_unregister(&epat); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/epia.c b/trunk/drivers/block/paride/epia.c index fb0e782d055e..0f2e0c292d82 100644 --- a/trunk/drivers/block/paride/epia.c +++ b/trunk/drivers/block/paride/epia.c @@ -303,12 +303,12 @@ static struct pi_protocol epia = { static int __init epia_init(void) { - return paride_register(&epia); + return pi_register(&epia)-1; } static void __exit epia_exit(void) { - paride_unregister(&epia); + pi_unregister(&epia); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/fit2.c b/trunk/drivers/block/paride/fit2.c index 381283753ae4..e0f0691d8bc2 100644 --- a/trunk/drivers/block/paride/fit2.c +++ b/trunk/drivers/block/paride/fit2.c @@ -138,12 +138,12 @@ static struct pi_protocol fit2 = { static int __init fit2_init(void) { - return paride_register(&fit2); + return pi_register(&fit2)-1; } static void __exit fit2_exit(void) { - paride_unregister(&fit2); + pi_unregister(&fit2); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/fit3.c b/trunk/drivers/block/paride/fit3.c index 275d269458eb..15400e7bc666 100644 --- a/trunk/drivers/block/paride/fit3.c +++ b/trunk/drivers/block/paride/fit3.c @@ -198,12 +198,12 @@ static struct pi_protocol fit3 = { static int __init fit3_init(void) { - return paride_register(&fit3); + return pi_register(&fit3)-1; } static void __exit fit3_exit(void) { - paride_unregister(&fit3); + pi_unregister(&fit3); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/friq.c b/trunk/drivers/block/paride/friq.c index 4f2ba244689b..5ea2904d2815 100644 --- a/trunk/drivers/block/paride/friq.c +++ b/trunk/drivers/block/paride/friq.c @@ -263,12 +263,12 @@ static struct pi_protocol friq = { static int __init friq_init(void) { - return paride_register(&friq); + return pi_register(&friq)-1; } static void __exit friq_exit(void) { - paride_unregister(&friq); + pi_unregister(&friq); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/frpw.c b/trunk/drivers/block/paride/frpw.c index c3cde364603a..56b3824b1538 100644 --- a/trunk/drivers/block/paride/frpw.c +++ b/trunk/drivers/block/paride/frpw.c @@ -300,12 +300,12 @@ static struct pi_protocol frpw = { static int __init frpw_init(void) { - return paride_register(&frpw); + return pi_register(&frpw)-1; } static void __exit frpw_exit(void) { - paride_unregister(&frpw); + pi_unregister(&frpw); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/jumbo b/trunk/drivers/block/paride/jumbo new file mode 100644 index 000000000000..e793b9cb7e72 --- /dev/null +++ b/trunk/drivers/block/paride/jumbo @@ -0,0 +1,70 @@ +#!/bin/sh +# +# This script can be used to build "jumbo" modules that contain the +# base PARIDE support, one protocol module and one high-level driver. +# +echo -n "High level driver [pcd] : " +read X +HLD=${X:-pcd} +# +echo -n "Protocol module [bpck] : " +read X +PROTO=${X:-bpck} +# +echo -n "Use MODVERSIONS [y] ? " +read X +UMODV=${X:-y} +# +echo -n "For SMP kernel [n] ? " +read X +USMP=${X:-n} +# +echo -n "Support PARPORT [n] ? " +read X +UPARP=${X:-n} +# +echo +# +case $USMP in + y* | Y* ) FSMP="-DCONFIG_SMP" + ;; + *) FSMP="" + ;; +esac +# +MODI="-include ../../../include/linux/modversions.h" +# +case $UMODV in + y* | Y* ) FMODV="-DMODVERSIONS $MODI" + ;; + *) FMODV="" + ;; +esac +# +case $UPARP in + y* | Y* ) FPARP="-DCONFIG_PARPORT" + ;; + *) FPARP="" + ;; +esac +# +TARG=$HLD-$PROTO.o +FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]` +FK="-D__KERNEL__ -I ../../../include" +FLCH=-D_LINUX_CONFIG_H +# +echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c +cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c +# +echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c +cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c +# +echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c +cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c +# +echo ld -r -o $TARG Jp.o Jb.o Jd.o +ld -r -o $TARG Jp.o Jb.o Jd.o +# +# +rm Jp.o Jb.o Jd.o +# diff --git a/trunk/drivers/block/paride/kbic.c b/trunk/drivers/block/paride/kbic.c index 35999c415ee3..d983bcea76fe 100644 --- a/trunk/drivers/block/paride/kbic.c +++ b/trunk/drivers/block/paride/kbic.c @@ -283,21 +283,13 @@ static struct pi_protocol k971 = { static int __init kbic_init(void) { - int rv; - - rv = paride_register(&k951); - if (rv < 0) - return rv; - rv = paride_register(&k971); - if (rv < 0) - paride_unregister(&k951); - return rv; + return (pi_register(&k951)||pi_register(&k971))-1; } static void __exit kbic_exit(void) { - paride_unregister(&k951); - paride_unregister(&k971); + pi_unregister(&k951); + pi_unregister(&k971); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/ktti.c b/trunk/drivers/block/paride/ktti.c index 117ab0e8ccf0..6c7edbfba9a0 100644 --- a/trunk/drivers/block/paride/ktti.c +++ b/trunk/drivers/block/paride/ktti.c @@ -115,12 +115,12 @@ static struct pi_protocol ktti = { static int __init ktti_init(void) { - return paride_register(&ktti); + return pi_register(&ktti)-1; } static void __exit ktti_exit(void) { - paride_unregister(&ktti); + pi_unregister(&ktti); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/on20.c b/trunk/drivers/block/paride/on20.c index 0173697a1a4d..9f8e01096809 100644 --- a/trunk/drivers/block/paride/on20.c +++ b/trunk/drivers/block/paride/on20.c @@ -140,12 +140,12 @@ static struct pi_protocol on20 = { static int __init on20_init(void) { - return paride_register(&on20); + return pi_register(&on20)-1; } static void __exit on20_exit(void) { - paride_unregister(&on20); + pi_unregister(&on20); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/on26.c b/trunk/drivers/block/paride/on26.c index 95ba256921f2..0f833caa2101 100644 --- a/trunk/drivers/block/paride/on26.c +++ b/trunk/drivers/block/paride/on26.c @@ -306,12 +306,12 @@ static struct pi_protocol on26 = { static int __init on26_init(void) { - return paride_register(&on26); + return pi_register(&on26)-1; } static void __exit on26_exit(void) { - paride_unregister(&on26); + pi_unregister(&on26); } MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/block/paride/paride.c b/trunk/drivers/block/paride/paride.c index 48c50f11f63b..4b258f7836f3 100644 --- a/trunk/drivers/block/paride/paride.c +++ b/trunk/drivers/block/paride/paride.c @@ -29,7 +29,14 @@ #include #include #include /* TASK_* */ + +#ifdef CONFIG_PARPORT_MODULE +#define CONFIG_PARPORT +#endif + +#ifdef CONFIG_PARPORT #include +#endif #include "paride.h" @@ -69,6 +76,8 @@ void pi_read_block(PIA * pi, char *buf, int count) EXPORT_SYMBOL(pi_read_block); +#ifdef CONFIG_PARPORT + static void pi_wake_up(void *p) { PIA *pi = (PIA *) p; @@ -91,8 +100,11 @@ static void pi_wake_up(void *p) cont(); } +#endif + int pi_schedule_claimed(PIA * pi, void (*cont) (void)) { +#ifdef CONFIG_PARPORT unsigned long flags; spin_lock_irqsave(&pi_spinlock, flags); @@ -103,6 +115,7 @@ int pi_schedule_claimed(PIA * pi, void (*cont) (void)) } pi->claimed = 1; spin_unlock_irqrestore(&pi_spinlock, flags); +#endif return 1; } EXPORT_SYMBOL(pi_schedule_claimed); @@ -120,16 +133,20 @@ static void pi_claim(PIA * pi) if (pi->claimed) return; pi->claimed = 1; +#ifdef CONFIG_PARPORT if (pi->pardev) wait_event(pi->parq, !parport_claim((struct pardevice *) pi->pardev)); +#endif } static void pi_unclaim(PIA * pi) { pi->claimed = 0; +#ifdef CONFIG_PARPORT if (pi->pardev) parport_release((struct pardevice *) (pi->pardev)); +#endif } void pi_connect(PIA * pi) @@ -150,15 +167,21 @@ EXPORT_SYMBOL(pi_disconnect); static void pi_unregister_parport(PIA * pi) { +#ifdef CONFIG_PARPORT if (pi->pardev) { parport_unregister_device((struct pardevice *) (pi->pardev)); pi->pardev = NULL; } +#endif } void pi_release(PIA * pi) { pi_unregister_parport(pi); +#ifndef CONFIG_PARPORT + if (pi->reserved) + release_region(pi->port, pi->reserved); +#endif /* !CONFIG_PARPORT */ if (pi->proto->release_proto) pi->proto->release_proto(pi); module_put(pi->proto->owner); @@ -206,7 +229,7 @@ static int pi_test_proto(PIA * pi, char *scratch, int verbose) return res; } -int paride_register(PIP * pr) +int pi_register(PIP * pr) { int k; @@ -214,24 +237,24 @@ int paride_register(PIP * pr) if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) { printk("paride: %s protocol already registered\n", pr->name); - return -1; + return 0; } k = 0; while ((k < MAX_PROTOS) && (protocols[k])) k++; if (k == MAX_PROTOS) { printk("paride: protocol table full\n"); - return -1; + return 0; } protocols[k] = pr; pr->index = k; printk("paride: %s registered as protocol %d\n", pr->name, k); - return 0; + return 1; } -EXPORT_SYMBOL(paride_register); +EXPORT_SYMBOL(pi_register); -void paride_unregister(PIP * pr) +void pi_unregister(PIP * pr) { if (!pr) return; @@ -242,10 +265,12 @@ void paride_unregister(PIP * pr) protocols[pr->index] = NULL; } -EXPORT_SYMBOL(paride_unregister); +EXPORT_SYMBOL(pi_unregister); static int pi_register_parport(PIA * pi, int verbose) { +#ifdef CONFIG_PARPORT + struct parport *port; port = parport_find_base(pi->port); @@ -265,6 +290,7 @@ static int pi_register_parport(PIA * pi, int verbose) printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name); pi->parname = (char *) port->name; +#endif return 1; } @@ -421,6 +447,13 @@ int pi_init(PIA * pi, int autoprobe, int port, int mode, printk("%s: Adapter not found\n", device); return 0; } +#ifndef CONFIG_PARPORT + if (!request_region(pi->port, pi->reserved, pi->device)) { + printk(KERN_WARNING "paride: Unable to request region 0x%x\n", + pi->port); + return 0; + } +#endif /* !CONFIG_PARPORT */ if (pi->parname) printk("%s: Sharing %s at 0x%x\n", pi->device, diff --git a/trunk/drivers/block/paride/paride.h b/trunk/drivers/block/paride/paride.h index 2bddbf45518b..c6d98ef09e48 100644 --- a/trunk/drivers/block/paride/paride.h +++ b/trunk/drivers/block/paride/paride.h @@ -163,8 +163,8 @@ struct pi_protocol { typedef struct pi_protocol PIP; -extern int paride_register( PIP * ); -extern void paride_unregister ( PIP * ); +extern int pi_register( PIP * ); +extern void pi_unregister ( PIP * ); #endif /* __DRIVERS_PARIDE_H__ */ /* end of paride.h */ diff --git a/trunk/drivers/block/paride/pcd.c b/trunk/drivers/block/paride/pcd.c index c852eed91e4b..ac5ba462710b 100644 --- a/trunk/drivers/block/paride/pcd.c +++ b/trunk/drivers/block/paride/pcd.c @@ -912,12 +912,12 @@ static int __init pcd_init(void) int unit; if (disable) - return -EINVAL; + return -1; pcd_init_units(); if (pcd_detect()) - return -ENODEV; + return -1; /* get the atapi capabilities page */ pcd_probe_capabilities(); @@ -925,7 +925,7 @@ static int __init pcd_init(void) if (register_blkdev(major, name)) { for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) put_disk(cd->disk); - return -EBUSY; + return -1; } pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock); @@ -933,7 +933,7 @@ static int __init pcd_init(void) unregister_blkdev(major, name); for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) put_disk(cd->disk); - return -ENOMEM; + return -1; } for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { diff --git a/trunk/drivers/block/paride/pd.c b/trunk/drivers/block/paride/pd.c index 9d9bff23f426..40a11e567970 100644 --- a/trunk/drivers/block/paride/pd.c +++ b/trunk/drivers/block/paride/pd.c @@ -352,19 +352,19 @@ static enum action (*phase)(void); static void run_fsm(void); -static void ps_tq_int(struct work_struct *work); +static void ps_tq_int( void *data); -static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int); +static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); static void schedule_fsm(void) { if (!nice) - schedule_delayed_work(&fsm_tq, 0); + schedule_work(&fsm_tq); else schedule_delayed_work(&fsm_tq, nice-1); } -static void ps_tq_int(struct work_struct *work) +static void ps_tq_int(void *data) { run_fsm(); } diff --git a/trunk/drivers/block/paride/pf.c b/trunk/drivers/block/paride/pf.c index 7cdaa1951260..1a9dee19efcf 100644 --- a/trunk/drivers/block/paride/pf.c +++ b/trunk/drivers/block/paride/pf.c @@ -933,25 +933,25 @@ static int __init pf_init(void) int unit; if (disable) - return -EINVAL; + return -1; pf_init_units(); if (pf_detect()) - return -ENODEV; + return -1; pf_busy = 0; if (register_blkdev(major, name)) { for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); - return -EBUSY; + return -1; } pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock); if (!pf_queue) { unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); - return -ENOMEM; + return -1; } blk_queue_max_phys_segments(pf_queue, cluster); diff --git a/trunk/drivers/block/paride/pg.c b/trunk/drivers/block/paride/pg.c index 9970aedbb5d9..13f998aa1cd3 100644 --- a/trunk/drivers/block/paride/pg.c +++ b/trunk/drivers/block/paride/pg.c @@ -646,14 +646,14 @@ static int __init pg_init(void) int err; if (disable){ - err = -EINVAL; + err = -1; goto out; } pg_init_units(); if (pg_detect()) { - err = -ENODEV; + err = -1; goto out; } diff --git a/trunk/drivers/block/paride/pseudo.h b/trunk/drivers/block/paride/pseudo.h index bc3703294143..932342d7a8eb 100644 --- a/trunk/drivers/block/paride/pseudo.h +++ b/trunk/drivers/block/paride/pseudo.h @@ -35,7 +35,7 @@ #include #include -static void ps_tq_int(struct work_struct *work); +static void ps_tq_int( void *data); static void (* ps_continuation)(void); static int (* ps_ready)(void); @@ -45,7 +45,7 @@ static int ps_nice = 0; static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); -static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); +static DECLARE_WORK(ps_tq, ps_tq_int, NULL); static void ps_set_intr(void (*continuation)(void), int (*ready)(void), @@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void), if (!ps_tq_active) { ps_tq_active = 1; if (!ps_nice) - schedule_delayed_work(&ps_tq, 0); + schedule_work(&ps_tq); else schedule_delayed_work(&ps_tq, ps_nice-1); } spin_unlock_irqrestore(&ps_spinlock,flags); } -static void ps_tq_int(struct work_struct *work) +static void ps_tq_int(void *data) { void (*con)(void); unsigned long flags; @@ -92,7 +92,7 @@ static void ps_tq_int(struct work_struct *work) } ps_tq_active = 1; if (!ps_nice) - schedule_delayed_work(&ps_tq, 0); + schedule_work(&ps_tq); else schedule_delayed_work(&ps_tq, ps_nice-1); spin_unlock_irqrestore(&ps_spinlock,flags); diff --git a/trunk/drivers/block/paride/pt.c b/trunk/drivers/block/paride/pt.c index c902b25e4869..35fb26636721 100644 --- a/trunk/drivers/block/paride/pt.c +++ b/trunk/drivers/block/paride/pt.c @@ -946,12 +946,12 @@ static int __init pt_init(void) int err; if (disable) { - err = -EINVAL; + err = -1; goto out; } if (pt_detect()) { - err = -ENODEV; + err = -1; goto out; } diff --git a/trunk/drivers/block/pktcdvd.c b/trunk/drivers/block/pktcdvd.c index e45eaa264119..f2904f67af47 100644 --- a/trunk/drivers/block/pktcdvd.c +++ b/trunk/drivers/block/pktcdvd.c @@ -54,7 +54,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/drivers/block/sx8.c b/trunk/drivers/block/sx8.c index 54509eb3391b..47d6975268ff 100644 --- a/trunk/drivers/block/sx8.c +++ b/trunk/drivers/block/sx8.c @@ -1244,10 +1244,9 @@ static irqreturn_t carm_interrupt(int irq, void *__host) return IRQ_RETVAL(handled); } -static void carm_fsm_task (struct work_struct *work) +static void carm_fsm_task (void *_data) { - struct carm_host *host = - container_of(work, struct carm_host, fsm_task); + struct carm_host *host = _data; unsigned long flags; unsigned int state; int rc, i, next_dev; @@ -1620,7 +1619,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) host->pdev = pdev; host->flags = pci_dac ? FL_DAC : 0; spin_lock_init(&host->lock); - INIT_WORK(&host->fsm_task, carm_fsm_task); + INIT_WORK(&host->fsm_task, carm_fsm_task, host); init_completion(&host->probe_comp); for (i = 0; i < ARRAY_SIZE(host->req); i++) diff --git a/trunk/drivers/block/ub.c b/trunk/drivers/block/ub.c index 2098eff91e14..0d5c73f07265 100644 --- a/trunk/drivers/block/ub.c +++ b/trunk/drivers/block/ub.c @@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int stalled_pipe); static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); static void ub_reset_enter(struct ub_dev *sc, int try); -static void ub_reset_task(struct work_struct *work); +static void ub_reset_task(void *arg); static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, struct ub_capacity *ret); @@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try) schedule_work(&sc->reset_work); } -static void ub_reset_task(struct work_struct *work) +static void ub_reset_task(void *arg) { - struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); + struct ub_dev *sc = arg; unsigned long flags; struct list_head *p; struct ub_lun *lun; @@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf, usb_init_urb(&sc->work_urb); tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); atomic_set(&sc->poison, 0); - INIT_WORK(&sc->reset_work, ub_reset_task); + INIT_WORK(&sc->reset_work, ub_reset_task, sc); init_waitqueue_head(&sc->reset_wait); init_timer(&sc->work_timer); diff --git a/trunk/drivers/bluetooth/bcm203x.c b/trunk/drivers/bluetooth/bcm203x.c index 9256985cbe36..516751754aa9 100644 --- a/trunk/drivers/bluetooth/bcm203x.c +++ b/trunk/drivers/bluetooth/bcm203x.c @@ -157,10 +157,9 @@ static void bcm203x_complete(struct urb *urb) } } -static void bcm203x_work(struct work_struct *work) +static void bcm203x_work(void *user_data) { - struct bcm203x_data *data = - container_of(work, struct bcm203x_data, work); + struct bcm203x_data *data = user_data; if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) BT_ERR("Can't submit URB"); @@ -247,7 +246,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id release_firmware(firmware); - INIT_WORK(&data->work, bcm203x_work); + INIT_WORK(&data->work, bcm203x_work, (void *) data); usb_set_intfdata(intf, data); diff --git a/trunk/drivers/bluetooth/hci_bcsp.c b/trunk/drivers/bluetooth/hci_bcsp.c index 5e2c31882003..d0cface535fb 100644 --- a/trunk/drivers/bluetooth/hci_bcsp.c +++ b/trunk/drivers/bluetooth/hci_bcsp.c @@ -330,7 +330,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) reliable packet if the number of packets sent but not yet ack'ed is < than the winsize */ - spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + spin_lock_irqsave(&bcsp->unack.lock, flags); if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); @@ -696,7 +696,7 @@ static void bcsp_timed_event(unsigned long arg) BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); - spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + spin_lock_irqsave(&bcsp->unack.lock, flags); while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; diff --git a/trunk/drivers/cdrom/optcd.c b/trunk/drivers/cdrom/optcd.c index 3541690a77d4..25032d7edc55 100644 --- a/trunk/drivers/cdrom/optcd.c +++ b/trunk/drivers/cdrom/optcd.c @@ -101,7 +101,7 @@ static void debug(int debug_this, const char* fmt, ...) return; va_start(args, fmt); - vsnprintf(s, sizeof(s), fmt, args); + vsprintf(s, fmt, args); printk(KERN_DEBUG "optcd: %s\n", s); va_end(args); } diff --git a/trunk/drivers/cdrom/sbpcd.c b/trunk/drivers/cdrom/sbpcd.c index a1283b1ef989..ba50e5a712f2 100644 --- a/trunk/drivers/cdrom/sbpcd.c +++ b/trunk/drivers/cdrom/sbpcd.c @@ -770,10 +770,11 @@ static void msg(int level, const char *fmt, ...) msgnum++; if (msgnum>99) msgnum=0; + sprintf(buf, MSG_LEVEL "%s-%d [%02d]: ", major_name, current_drive - D_S, msgnum); va_start(args, fmt); - vsnprintf(buf, sizeof(buf), fmt, args); + vsprintf(&buf[18], fmt, args); va_end(args); - printk(MSG_LEVEL "%s-%d [%02d]: %s", major_name, current_drive - D_S, msgnum, buf); + printk(buf); #if KLOGD_PAUSE sbp_sleep(KLOGD_PAUSE); /* else messages get lost */ #endif /* KLOGD_PAUSE */ diff --git a/trunk/drivers/char/agp/amd64-agp.c b/trunk/drivers/char/agp/amd64-agp.c index 2f2c4efff8a3..00b17ae39736 100644 --- a/trunk/drivers/char/agp/amd64-agp.c +++ b/trunk/drivers/char/agp/amd64-agp.c @@ -459,7 +459,7 @@ static const struct aper_size_info_32 nforce3_sizes[5] = /* Handle shadow device of the Nvidia NForce3 */ /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ -static int nforce3_agp_init(struct pci_dev *pdev) +static int __devinit nforce3_agp_init(struct pci_dev *pdev) { u32 tmp, apbase, apbar, aplimit; struct pci_dev *dev1; diff --git a/trunk/drivers/char/cyclades.c b/trunk/drivers/char/cyclades.c index acb2de5e3a98..e608dadece2f 100644 --- a/trunk/drivers/char/cyclades.c +++ b/trunk/drivers/char/cyclades.c @@ -926,10 +926,9 @@ cy_sched_event(struct cyclades_port *info, int event) * had to poll every port to see if that port needed servicing. */ static void -do_softint(struct work_struct *work) +do_softint(void *private_) { - struct cyclades_port *info = - container_of(work, struct cyclades_port, tqueue); + struct cyclades_port *info = (struct cyclades_port *) private_; struct tty_struct *tty; tty = info->tty; @@ -5329,7 +5328,7 @@ cy_init(void) info->blocked_open = 0; info->default_threshold = 0; info->default_timeout = 0; - INIT_WORK(&info->tqueue, do_softint); + INIT_WORK(&info->tqueue, do_softint, info); init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); init_waitqueue_head(&info->shutdown_wait); @@ -5404,7 +5403,7 @@ cy_init(void) info->blocked_open = 0; info->default_threshold = 0; info->default_timeout = 0; - INIT_WORK(&info->tqueue, do_softint); + INIT_WORK(&info->tqueue, do_softint, info); init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); init_waitqueue_head(&info->shutdown_wait); diff --git a/trunk/drivers/char/decserial.c b/trunk/drivers/char/decserial.c index 8ea2bea2b183..85f404e25c73 100644 --- a/trunk/drivers/char/decserial.c +++ b/trunk/drivers/char/decserial.c @@ -23,12 +23,20 @@ extern int zs_init(void); #endif +#ifdef CONFIG_DZ +extern int dz_init(void); +#endif + #ifdef CONFIG_SERIAL_CONSOLE #ifdef CONFIG_ZS extern void zs_serial_console_init(void); #endif +#ifdef CONFIG_DZ +extern void dz_serial_console_init(void); +#endif + #endif /* rs_init - starts up the serial interface - @@ -38,11 +46,23 @@ extern void zs_serial_console_init(void); int __init rs_init(void) { -#ifdef CONFIG_ZS + +#if defined(CONFIG_ZS) && defined(CONFIG_DZ) if (IOASIC) return zs_init(); + else + return dz_init(); +#else + +#ifdef CONFIG_ZS + return zs_init(); +#endif + +#ifdef CONFIG_DZ + return dz_init(); +#endif + #endif - return -ENXIO; } __initcall(rs_init); @@ -56,9 +76,21 @@ __initcall(rs_init); */ static int __init decserial_console_init(void) { -#ifdef CONFIG_ZS +#if defined(CONFIG_ZS) && defined(CONFIG_DZ) if (IOASIC) zs_serial_console_init(); + else + dz_serial_console_init(); +#else + +#ifdef CONFIG_ZS + zs_serial_console_init(); +#endif + +#ifdef CONFIG_DZ + dz_serial_console_init(); +#endif + #endif return 0; } diff --git a/trunk/drivers/char/drm/drm_sman.c b/trunk/drivers/char/drm/drm_sman.c index 19c81d2e13d0..425c82336ee0 100644 --- a/trunk/drivers/char/drm/drm_sman.c +++ b/trunk/drivers/char/drm/drm_sman.c @@ -162,7 +162,6 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, return 0; } -EXPORT_SYMBOL(drm_sman_set_manager); static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, unsigned long owner) diff --git a/trunk/drivers/char/drm/drm_vm.c b/trunk/drivers/char/drm/drm_vm.c index ae2691942ddb..b40ae438f531 100644 --- a/trunk/drivers/char/drm/drm_vm.c +++ b/trunk/drivers/char/drm/drm_vm.c @@ -147,14 +147,14 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!map) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return NOPAGE_OOM; /* Nothing allocated */ offset = address - vma->vm_start; i = (unsigned long)map->handle + offset; page = (map->type == _DRM_CONSISTENT) ? virt_to_page((void *)i) : vmalloc_to_page((void *)i); if (!page) - return NOPAGE_SIGBUS; + return NOPAGE_OOM; get_page(page); DRM_DEBUG("shm_nopage 0x%lx\n", address); @@ -272,7 +272,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!dma->pagelist) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return NOPAGE_OOM; /* Nothing allocated */ offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ page_nr = offset >> PAGE_SHIFT; @@ -310,7 +310,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!entry->pagelist) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return NOPAGE_OOM; /* Nothing allocated */ offset = address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; diff --git a/trunk/drivers/char/drm/via_dmablit.c b/trunk/drivers/char/drm/via_dmablit.c index 806f9ce5f47b..60c1695db300 100644 --- a/trunk/drivers/char/drm/via_dmablit.c +++ b/trunk/drivers/char/drm/via_dmablit.c @@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data) static void -via_dmablit_workqueue(struct work_struct *work) +via_dmablit_workqueue(void *data) { - drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); + drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; drm_device_t *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; @@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev) DRM_INIT_WAITQUEUE(blitq->blit_queue + j); } DRM_INIT_WAITQUEUE(&blitq->busy_queue); - INIT_WORK(&blitq->wq, via_dmablit_workqueue); + INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); init_timer(&blitq->poll_timer); blitq->poll_timer.function = &via_dmablit_timer; blitq->poll_timer.data = (unsigned long) blitq; diff --git a/trunk/drivers/char/epca.c b/trunk/drivers/char/epca.c index 7c71eb779802..706733c0b36a 100644 --- a/trunk/drivers/char/epca.c +++ b/trunk/drivers/char/epca.c @@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *, static int info_ioctl(struct tty_struct *, struct file *, unsigned int, unsigned long); static void pc_set_termios(struct tty_struct *, struct termios *); -static void do_softint(struct work_struct *work); +static void do_softint(void *); static void pc_stop(struct tty_struct *); static void pc_start(struct tty_struct *); static void pc_throttle(struct tty_struct * tty); @@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd) ch->brdchan = bc; ch->mailbox = gd; - INIT_WORK(&ch->tqueue, do_softint); + INIT_WORK(&ch->tqueue, do_softint, ch); ch->board = &boards[crd]; spin_lock_irqsave(&epca_lock, flags); @@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios) /* --------------------- Begin do_softint ----------------------- */ -static void do_softint(struct work_struct *work) +static void do_softint(void *private_) { /* Begin do_softint */ - struct channel *ch = container_of(work, struct channel, tqueue); + struct channel *ch = (struct channel *) private_; /* Called in response to a modem change event */ if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ struct tty_struct *tty = ch->tty; diff --git a/trunk/drivers/char/esp.c b/trunk/drivers/char/esp.c index 93b551962513..15a4ea896328 100644 --- a/trunk/drivers/char/esp.c +++ b/trunk/drivers/char/esp.c @@ -723,10 +723,9 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) * ------------------------------------------------------------------- */ -static void do_softint(struct work_struct *work) +static void do_softint(void *private_) { - struct esp_struct *info = - container_of(work, struct esp_struct, tqueue); + struct esp_struct *info = (struct esp_struct *) private_; struct tty_struct *tty; tty = info->tty; @@ -747,10 +746,9 @@ static void do_softint(struct work_struct *work) * do_serial_hangup() -> tty->hangup() -> esp_hangup() * */ -static void do_serial_hangup(struct work_struct *work) +static void do_serial_hangup(void *private_) { - struct esp_struct *info = - container_of(work, struct esp_struct, tqueue_hangup); + struct esp_struct *info = (struct esp_struct *) private_; struct tty_struct *tty; tty = info->tty; @@ -2503,8 +2501,8 @@ static int __init espserial_init(void) info->magic = ESP_MAGIC; info->close_delay = 5*HZ/10; info->closing_wait = 30*HZ; - INIT_WORK(&info->tqueue, do_softint); - INIT_WORK(&info->tqueue_hangup, do_serial_hangup); + INIT_WORK(&info->tqueue, do_softint, info); + INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); info->config.rx_timeout = rx_timeout; info->config.flow_on = flow_on; info->config.flow_off = flow_off; diff --git a/trunk/drivers/char/genrtc.c b/trunk/drivers/char/genrtc.c index 23b25ada65ea..817dc409ac20 100644 --- a/trunk/drivers/char/genrtc.c +++ b/trunk/drivers/char/genrtc.c @@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg); * Routine to poll RTC seconds field for change as often as possible, * after first RTC_UIE use timer to reduce polling */ -static void genrtc_troutine(struct work_struct *work) +static void genrtc_troutine(void *data) { unsigned int tmp = get_rtc_ss(); @@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit) irq_active = 1; stop_rtc_timers = 0; lostint = 0; - INIT_WORK(&genrtc_task, genrtc_troutine); + INIT_WORK(&genrtc_task, genrtc_troutine, NULL); oldsecs = get_rtc_ss(); init_timer(&timer_task); diff --git a/trunk/drivers/char/hvc_console.c b/trunk/drivers/char/hvc_console.c index cc2cd46bedc6..9902ffad3b12 100644 --- a/trunk/drivers/char/hvc_console.c +++ b/trunk/drivers/char/hvc_console.c @@ -38,7 +38,6 @@ #include #include #include -#include #include diff --git a/trunk/drivers/char/hvcs.c b/trunk/drivers/char/hvcs.c index d090622f1dea..8728255c9463 100644 --- a/trunk/drivers/char/hvcs.c +++ b/trunk/drivers/char/hvcs.c @@ -337,6 +337,11 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp); static void hvcs_close(struct tty_struct *tty, struct file *filp); static void hvcs_hangup(struct tty_struct * tty); +static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd); +static void hvcs_remove_device_attrs(struct vio_dev *vdev); +static void hvcs_create_driver_attrs(void); +static void hvcs_remove_driver_attrs(void); + static int __devinit hvcs_probe(struct vio_dev *dev, const struct vio_device_id *id); static int __devexit hvcs_remove(struct vio_dev *dev); @@ -348,172 +353,6 @@ static void __exit hvcs_module_exit(void); #define HVCS_TRY_WRITE 0x00000004 #define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ) -static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) -{ - return viod->dev.driver_data; -} -/* The sysfs interface for the driver and devices */ - -static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - int retval; - - spin_lock_irqsave(&hvcsd->lock, flags); - retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); - spin_unlock_irqrestore(&hvcsd->lock, flags); - return retval; -} -static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); - -static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - int retval; - - spin_lock_irqsave(&hvcsd->lock, flags); - retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); - spin_unlock_irqrestore(&hvcsd->lock, flags); - return retval; -} -static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); - -static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, - size_t count) -{ - /* - * Don't need this feature at the present time because firmware doesn't - * yet support multiple partners. - */ - printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); - return -EPERM; -} - -static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - int retval; - - spin_lock_irqsave(&hvcsd->lock, flags); - retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); - spin_unlock_irqrestore(&hvcsd->lock, flags); - return retval; -} - -static DEVICE_ATTR(current_vty, - S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); - -static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, - size_t count) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - - /* writing a '0' to this sysfs entry will result in the disconnect. */ - if (simple_strtol(buf, NULL, 0) != 0) - return -EINVAL; - - spin_lock_irqsave(&hvcsd->lock, flags); - - if (hvcsd->open_count > 0) { - spin_unlock_irqrestore(&hvcsd->lock, flags); - printk(KERN_INFO "HVCS: vterm state unchanged. " - "The hvcs device node is still in use.\n"); - return -EPERM; - } - - if (hvcsd->connected == 0) { - spin_unlock_irqrestore(&hvcsd->lock, flags); - printk(KERN_INFO "HVCS: vterm state unchanged. The" - " vty-server is not connected to a vty.\n"); - return -EPERM; - } - - hvcs_partner_free(hvcsd); - printk(KERN_INFO "HVCS: Closed vty-server@%X and" - " partner vty@%X:%d connection.\n", - hvcsd->vdev->unit_address, - hvcsd->p_unit_address, - (uint32_t)hvcsd->p_partition_ID); - - spin_unlock_irqrestore(&hvcsd->lock, flags); - return count; -} - -static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - int retval; - - spin_lock_irqsave(&hvcsd->lock, flags); - retval = sprintf(buf, "%d\n", hvcsd->connected); - spin_unlock_irqrestore(&hvcsd->lock, flags); - return retval; -} -static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, - hvcs_vterm_state_show, hvcs_vterm_state_store); - -static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct vio_dev *viod = to_vio_dev(dev); - struct hvcs_struct *hvcsd = from_vio_dev(viod); - unsigned long flags; - int retval; - - spin_lock_irqsave(&hvcsd->lock, flags); - retval = sprintf(buf, "%d\n", hvcsd->index); - spin_unlock_irqrestore(&hvcsd->lock, flags); - return retval; -} - -static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); - -static struct attribute *hvcs_attrs[] = { - &dev_attr_partner_vtys.attr, - &dev_attr_partner_clcs.attr, - &dev_attr_current_vty.attr, - &dev_attr_vterm_state.attr, - &dev_attr_index.attr, - NULL, -}; - -static struct attribute_group hvcs_attr_group = { - .attrs = hvcs_attrs, -}; - -static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) -{ - /* A 1 means it is updating, a 0 means it is done updating */ - return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); -} - -static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, - size_t count) -{ - if ((simple_strtol(buf, NULL, 0) != 1) - && (hvcs_rescan_status != 0)) - return -EINVAL; - - hvcs_rescan_status = 1; - printk(KERN_INFO "HVCS: rescanning partner info for all" - " vty-servers.\n"); - hvcs_rescan_devices_list(); - hvcs_rescan_status = 0; - return count; -} - -static DRIVER_ATTR(rescan, - S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); - static void hvcs_kick(void) { hvcs_kicked = 1; @@ -736,7 +575,7 @@ static void destroy_hvcs_struct(struct kobject *kobj) spin_unlock_irqrestore(&hvcsd->lock, flags); spin_unlock(&hvcs_structs_lock); - sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); + hvcs_remove_device_attrs(vdev); kfree(hvcsd); } @@ -769,7 +608,6 @@ static int __devinit hvcs_probe( { struct hvcs_struct *hvcsd; int index; - int retval; if (!dev || !id) { printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); @@ -820,16 +658,14 @@ static int __devinit hvcs_probe( * the hvcs_struct has been added to the devices list then the user app * will get -ENODEV. */ + spin_lock(&hvcs_structs_lock); + list_add_tail(&(hvcsd->next), &hvcs_structs); + spin_unlock(&hvcs_structs_lock); - retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group); - if (retval) { - printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n", - hvcsd->vdev->unit_address); - return retval; - } + hvcs_create_device_attrs(hvcsd); printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address); @@ -1518,10 +1354,8 @@ static int __init hvcs_module_init(void) if (!hvcs_tty_driver) return -ENOMEM; - if (hvcs_alloc_index_list(num_ttys_to_alloc)) { - rc = -ENOMEM; - goto index_fail; - } + if (hvcs_alloc_index_list(num_ttys_to_alloc)) + return -ENOMEM; hvcs_tty_driver->owner = THIS_MODULE; @@ -1551,57 +1385,41 @@ static int __init hvcs_module_init(void) * dynamically assigned major and minor numbers for our devices. */ if (tty_register_driver(hvcs_tty_driver)) { - printk(KERN_ERR "HVCS: registration as a tty driver failed.\n"); - rc = -EIO; - goto register_fail; + printk(KERN_ERR "HVCS: registration " + " as a tty driver failed.\n"); + hvcs_free_index_list(); + put_tty_driver(hvcs_tty_driver); + return -EIO; } hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!hvcs_pi_buff) { - rc = -ENOMEM; - goto buff_alloc_fail; + tty_unregister_driver(hvcs_tty_driver); + hvcs_free_index_list(); + put_tty_driver(hvcs_tty_driver); + return -ENOMEM; } hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); if (IS_ERR(hvcs_task)) { printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n"); - rc = -EIO; - goto kthread_fail; + kfree(hvcs_pi_buff); + tty_unregister_driver(hvcs_tty_driver); + hvcs_free_index_list(); + put_tty_driver(hvcs_tty_driver); + return -EIO; } rc = vio_register_driver(&hvcs_vio_driver); - if (rc) { - printk(KERN_ERR "HVCS: can't register vio driver\n"); - goto vio_fail; - } /* * This needs to be done AFTER the vio_register_driver() call or else * the kobjects won't be initialized properly. */ - rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); - if (rc) { - printk(KERN_ERR "HVCS: sysfs attr create failed\n"); - goto attr_fail; - } + hvcs_create_driver_attrs(); printk(KERN_INFO "HVCS: driver module inserted.\n"); - return 0; - -attr_fail: - vio_unregister_driver(&hvcs_vio_driver); -vio_fail: - kthread_stop(hvcs_task); -kthread_fail: - kfree(hvcs_pi_buff); -buff_alloc_fail: - tty_unregister_driver(hvcs_tty_driver); -register_fail: - hvcs_free_index_list(); -index_fail: - put_tty_driver(hvcs_tty_driver); - hvcs_tty_driver = NULL; return rc; } @@ -1623,7 +1441,7 @@ static void __exit hvcs_module_exit(void) hvcs_pi_buff = NULL; spin_unlock(&hvcs_pi_lock); - driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan); + hvcs_remove_driver_attrs(); vio_unregister_driver(&hvcs_vio_driver); @@ -1638,3 +1456,191 @@ static void __exit hvcs_module_exit(void) module_init(hvcs_module_init); module_exit(hvcs_module_exit); + +static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) +{ + return viod->dev.driver_data; +} +/* The sysfs interface for the driver and devices */ + +static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); + +static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); + +static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, + size_t count) +{ + /* + * Don't need this feature at the present time because firmware doesn't + * yet support multiple partners. + */ + printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); + return -EPERM; +} + +static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} + +static DEVICE_ATTR(current_vty, + S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); + +static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + + /* writing a '0' to this sysfs entry will result in the disconnect. */ + if (simple_strtol(buf, NULL, 0) != 0) + return -EINVAL; + + spin_lock_irqsave(&hvcsd->lock, flags); + + if (hvcsd->open_count > 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. " + "The hvcs device node is still in use.\n"); + return -EPERM; + } + + if (hvcsd->connected == 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. The" + " vty-server is not connected to a vty.\n"); + return -EPERM; + } + + hvcs_partner_free(hvcsd); + printk(KERN_INFO "HVCS: Closed vty-server@%X and" + " partner vty@%X:%d connection.\n", + hvcsd->vdev->unit_address, + hvcsd->p_unit_address, + (uint32_t)hvcsd->p_partition_ID); + + spin_unlock_irqrestore(&hvcsd->lock, flags); + return count; +} + +static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%d\n", hvcsd->connected); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, + hvcs_vterm_state_show, hvcs_vterm_state_store); + +static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%d\n", hvcsd->index); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} + +static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); + +static struct attribute *hvcs_attrs[] = { + &dev_attr_partner_vtys.attr, + &dev_attr_partner_clcs.attr, + &dev_attr_current_vty.attr, + &dev_attr_vterm_state.attr, + &dev_attr_index.attr, + NULL, +}; + +static struct attribute_group hvcs_attr_group = { + .attrs = hvcs_attrs, +}; + +static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd) +{ + struct vio_dev *vdev = hvcsd->vdev; + sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group); +} + +static void hvcs_remove_device_attrs(struct vio_dev *vdev) +{ + sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); +} + +static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) +{ + /* A 1 means it is updating, a 0 means it is done updating */ + return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); +} + +static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, + size_t count) +{ + if ((simple_strtol(buf, NULL, 0) != 1) + && (hvcs_rescan_status != 0)) + return -EINVAL; + + hvcs_rescan_status = 1; + printk(KERN_INFO "HVCS: rescanning partner info for all" + " vty-servers.\n"); + hvcs_rescan_devices_list(); + hvcs_rescan_status = 0; + return count; +} +static DRIVER_ATTR(rescan, + S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); + +static void hvcs_create_driver_attrs(void) +{ + struct device_driver *driverfs = &(hvcs_vio_driver.driver); + driver_create_file(driverfs, &driver_attr_rescan); +} + +static void hvcs_remove_driver_attrs(void) +{ + struct device_driver *driverfs = &(hvcs_vio_driver.driver); + driver_remove_file(driverfs, &driver_attr_rescan); +} diff --git a/trunk/drivers/char/hvsi.c b/trunk/drivers/char/hvsi.c index 82a41d5b4ed0..2cf63e7305a3 100644 --- a/trunk/drivers/char/hvsi.c +++ b/trunk/drivers/char/hvsi.c @@ -69,7 +69,7 @@ #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) struct hvsi_struct { - struct delayed_work writer; + struct work_struct writer; struct work_struct handshaker; wait_queue_head_t emptyq; /* woken when outbuf is emptied */ wait_queue_head_t stateq; /* woken when HVSI state changes */ @@ -744,10 +744,9 @@ static int hvsi_handshake(struct hvsi_struct *hp) return 0; } -static void hvsi_handshaker(struct work_struct *work) +static void hvsi_handshaker(void *arg) { - struct hvsi_struct *hp = - container_of(work, struct hvsi_struct, handshaker); + struct hvsi_struct *hp = (struct hvsi_struct *)arg; if (hvsi_handshake(hp) >= 0) return; @@ -952,10 +951,9 @@ static void hvsi_push(struct hvsi_struct *hp) } /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ -static void hvsi_write_worker(struct work_struct *work) +static void hvsi_write_worker(void *arg) { - struct hvsi_struct *hp = - container_of(work, struct hvsi_struct, writer.work); + struct hvsi_struct *hp = (struct hvsi_struct *)arg; unsigned long flags; #ifdef DEBUG static long start_j = 0; @@ -1289,8 +1287,8 @@ static int __init hvsi_console_init(void) } hp = &hvsi_ports[hvsi_count]; - INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); - INIT_WORK(&hp->handshaker, hvsi_handshaker); + INIT_WORK(&hp->writer, hvsi_write_worker, hp); + INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); init_waitqueue_head(&hp->emptyq); init_waitqueue_head(&hp->stateq); spin_lock_init(&hp->lock); diff --git a/trunk/drivers/char/hw_random/Kconfig b/trunk/drivers/char/hw_random/Kconfig index 5f3acd8e64b8..9f7635f75178 100644 --- a/trunk/drivers/char/hw_random/Kconfig +++ b/trunk/drivers/char/hw_random/Kconfig @@ -3,20 +3,17 @@ # config HW_RANDOM - tristate "Hardware Random Number Generator Core support" - default m + bool "Hardware Random Number Generator Core support" + default y ---help--- Hardware Random Number Generator Core infrastructure. - To compile this driver as a module, choose M here: the - module will be called rng-core. - If unsure, say Y. config HW_RANDOM_INTEL tristate "Intel HW Random Number Generator support" depends on HW_RANDOM && (X86 || IA64) && PCI - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on Intel i8xx-based motherboards. @@ -29,7 +26,7 @@ config HW_RANDOM_INTEL config HW_RANDOM_AMD tristate "AMD HW Random Number Generator support" depends on HW_RANDOM && X86 && PCI - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on AMD 76x-based motherboards. @@ -42,7 +39,7 @@ config HW_RANDOM_AMD config HW_RANDOM_GEODE tristate "AMD Geode HW Random Number Generator support" depends on HW_RANDOM && X86 && PCI - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on the AMD Geode LX. @@ -55,7 +52,7 @@ config HW_RANDOM_GEODE config HW_RANDOM_VIA tristate "VIA HW Random Number Generator support" depends on HW_RANDOM && X86_32 - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on VIA based motherboards. @@ -68,7 +65,7 @@ config HW_RANDOM_VIA config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Random Number Generator support" depends on HW_RANDOM && ARCH_IXP4XX - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on the Intel IXP4xx NPU. @@ -81,7 +78,7 @@ config HW_RANDOM_IXP4XX config HW_RANDOM_OMAP tristate "OMAP Random Number Generator support" depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX) - default HW_RANDOM + default y ---help--- This driver provides kernel-side support for the Random Number Generator hardware found on OMAP16xx and OMAP24xx multimedia diff --git a/trunk/drivers/char/hw_random/Makefile b/trunk/drivers/char/hw_random/Makefile index c41fa19454e3..e263ae96f940 100644 --- a/trunk/drivers/char/hw_random/Makefile +++ b/trunk/drivers/char/hw_random/Makefile @@ -2,8 +2,7 @@ # Makefile for HW Random Number Generator (RNG) device drivers. # -obj-$(CONFIG_HW_RANDOM) += rng-core.o -rng-core-y := core.o +obj-$(CONFIG_HW_RANDOM) += core.o obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o diff --git a/trunk/drivers/char/ip2/i2cmd.h b/trunk/drivers/char/ip2/i2cmd.h index 29277ec6b8ed..baa4e721b758 100644 --- a/trunk/drivers/char/ip2/i2cmd.h +++ b/trunk/drivers/char/ip2/i2cmd.h @@ -367,6 +367,11 @@ static UCHAR cc02[]; #define CSE_NULL 3 // Replace with a null #define CSE_MARK 4 // Replace with a 3-character sequence (as Unix) +#define CMD_SET_REPLACEMENT(arg,ch) \ + (((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \ + (((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch), \ + (cmdSyntaxPtr)(ct36a)) + #define CSE_REPLACE 0x8 // Replace the errored character with the // replacement character defined here diff --git a/trunk/drivers/char/ip2/i2lib.c b/trunk/drivers/char/ip2/i2lib.c index 78045767ec33..54d93f0345e8 100644 --- a/trunk/drivers/char/ip2/i2lib.c +++ b/trunk/drivers/char/ip2/i2lib.c @@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr); static void serviceOutgoingFifo(i2eBordStrPtr); // Functions defined in ip2.c as part of interrupt handling -static void do_input(struct work_struct *); -static void do_status(struct work_struct *); +static void do_input(void *); +static void do_status(void *); //*************** //* Debug Data * @@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) pCh->ClosingWaitTime = 30*HZ; // Initialize task queue objects - INIT_WORK(&pCh->tqueue_input, do_input); - INIT_WORK(&pCh->tqueue_status, do_status); + INIT_WORK(&pCh->tqueue_input, do_input, pCh); + INIT_WORK(&pCh->tqueue_status, do_status, pCh); #ifdef IP2DEBUG_TRACE pCh->trace = ip2trace; @@ -1016,6 +1016,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count) unsigned short channel; unsigned short stuffIndex; unsigned long flags; + int rc = 0; int bailout = 10; @@ -1572,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB) #ifdef USE_IQ schedule_work(&pCh->tqueue_input); #else - do_input(&pCh->tqueue_input); + do_input(pCh); #endif // Note we do not need to maintain any flow-control credits at this @@ -1809,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB) #ifdef USE_IQ schedule_work(&pCh->tqueue_status); #else - do_status(&pCh->tqueue_status); + do_status(pCh); #endif } } diff --git a/trunk/drivers/char/ip2/ip2main.c b/trunk/drivers/char/ip2/ip2main.c index cda2459c1d60..a3f32d46d2f8 100644 --- a/trunk/drivers/char/ip2/ip2main.c +++ b/trunk/drivers/char/ip2/ip2main.c @@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); static void set_irq(int, int); -static void ip2_interrupt_bh(struct work_struct *work); +static void ip2_interrupt_bh(i2eBordStrPtr pB); static irqreturn_t ip2_interrupt(int irq, void *dev_id); static void ip2_poll(unsigned long arg); static inline void service_all_boards(void); -static void do_input(struct work_struct *); -static void do_status(struct work_struct *); +static void do_input(void *p); +static void do_status(void *p); static void ip2_wait_until_sent(PTTY,int); @@ -918,7 +918,7 @@ ip2_init_board( int boardnum ) pCh++; } ex_exit: - INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh); + INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); return; err_release_region: @@ -1125,8 +1125,8 @@ service_all_boards(void) /******************************************************************************/ -/* Function: ip2_interrupt_bh(work) */ -/* Parameters: work - pointer to the board structure */ +/* Function: ip2_interrupt_bh(pB) */ +/* Parameters: pB - pointer to the board structure */ /* Returns: Nothing */ /* */ /* Description: */ @@ -1135,9 +1135,8 @@ service_all_boards(void) /* */ /******************************************************************************/ static void -ip2_interrupt_bh(struct work_struct *work) +ip2_interrupt_bh(i2eBordStrPtr pB) { - i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt); // pB better well be set or we have a problem! We can only get // here from the IMMEDIATE queue. Here, we process the boards. // Checking pB doesn't cost much and it saves us from the sanity checkers. @@ -1246,9 +1245,9 @@ ip2_poll(unsigned long arg) ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); } -static void do_input(struct work_struct *work) +static void do_input(void *p) { - i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input); + i2ChanStrPtr pCh = p; unsigned long flags; ip2trace(CHANN, ITRC_INPUT, 21, 0 ); @@ -1280,9 +1279,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush) } } -static void do_status(struct work_struct *work) +static void do_status(void *p) { - i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status); + i2ChanStrPtr pCh = p; int status; status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); diff --git a/trunk/drivers/char/ipmi/ipmi_bt_sm.c b/trunk/drivers/char/ipmi/ipmi_bt_sm.c index 6c59baa887a8..0030cd8e2e95 100644 --- a/trunk/drivers/char/ipmi/ipmi_bt_sm.c +++ b/trunk/drivers/char/ipmi/ipmi_bt_sm.c @@ -33,13 +33,11 @@ #include /* for completion codes */ #include "ipmi_si_sm.h" -#define BT_DEBUG_OFF 0 /* Used in production */ -#define BT_DEBUG_ENABLE 1 /* Generic messages */ -#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ -#define BT_DEBUG_STATES 4 /* Verbose look at state changes */ - -static int bt_debug = BT_DEBUG_OFF; +static int bt_debug = 0x00; /* Production value 0, see following flags */ +#define BT_DEBUG_ENABLE 1 +#define BT_DEBUG_MSG 2 +#define BT_DEBUG_STATES 4 module_param(bt_debug, int, 0644); MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); @@ -49,54 +47,38 @@ MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); Since the Open IPMI architecture is single-message oriented at this stage, the queue depth of BT is of no concern. */ -#define BT_NORMAL_TIMEOUT 5 /* seconds */ -#define BT_NORMAL_RETRY_LIMIT 2 -#define BT_RESET_DELAY 6 /* seconds after warm reset */ - -/* States are written in chronological order and usually cover - multiple rows of the state table discussion in the IPMI spec. */ +#define BT_NORMAL_TIMEOUT 5000000 /* seconds in microseconds */ +#define BT_RETRY_LIMIT 2 +#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */ enum bt_states { - BT_STATE_IDLE = 0, /* Order is critical in this list */ + BT_STATE_IDLE, BT_STATE_XACTION_START, BT_STATE_WRITE_BYTES, + BT_STATE_WRITE_END, BT_STATE_WRITE_CONSUME, - BT_STATE_READ_WAIT, - BT_STATE_CLEAR_B2H, - BT_STATE_READ_BYTES, - BT_STATE_RESET1, /* These must come last */ + BT_STATE_B2H_WAIT, + BT_STATE_READ_END, + BT_STATE_RESET1, /* These must come last */ BT_STATE_RESET2, BT_STATE_RESET3, BT_STATE_RESTART, - BT_STATE_PRINTME, - BT_STATE_CAPABILITIES_BEGIN, - BT_STATE_CAPABILITIES_END, - BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ + BT_STATE_HOSED }; -/* Macros seen at the end of state "case" blocks. They help with legibility - and debugging. */ - -#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } - -#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } - struct si_sm_data { enum bt_states state; + enum bt_states last_state; /* assist printing and resets */ unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH]; - int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH]; - int read_count; - int truncated; - long timeout; /* microseconds countdown */ - int error_retries; /* end of "common" fields */ + unsigned char write_data[IPMI_MAX_MSG_LENGTH]; + int write_count; + unsigned char read_data[IPMI_MAX_MSG_LENGTH]; + int read_count; + int truncated; + long timeout; + unsigned int error_retries; /* end of "common" fields */ int nonzero_status; /* hung BMCs stay all 0 */ - enum bt_states complete; /* to divert the state machine */ - int BT_CAP_outreqs; - long BT_CAP_req2rsp; - int BT_CAP_retries; /* Recommended retries */ }; #define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */ @@ -129,118 +111,86 @@ struct si_sm_data { static char *state2txt(unsigned char state) { switch (state) { - case BT_STATE_IDLE: return("IDLE"); - case BT_STATE_XACTION_START: return("XACTION"); - case BT_STATE_WRITE_BYTES: return("WR_BYTES"); - case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); - case BT_STATE_READ_WAIT: return("RD_WAIT"); - case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); - case BT_STATE_READ_BYTES: return("RD_BYTES"); - case BT_STATE_RESET1: return("RESET1"); - case BT_STATE_RESET2: return("RESET2"); - case BT_STATE_RESET3: return("RESET3"); - case BT_STATE_RESTART: return("RESTART"); - case BT_STATE_LONG_BUSY: return("LONG_BUSY"); - case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); - case BT_STATE_CAPABILITIES_END: return("CAP_END"); + case BT_STATE_IDLE: return("IDLE"); + case BT_STATE_XACTION_START: return("XACTION"); + case BT_STATE_WRITE_BYTES: return("WR_BYTES"); + case BT_STATE_WRITE_END: return("WR_END"); + case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); + case BT_STATE_B2H_WAIT: return("B2H_WAIT"); + case BT_STATE_READ_END: return("RD_END"); + case BT_STATE_RESET1: return("RESET1"); + case BT_STATE_RESET2: return("RESET2"); + case BT_STATE_RESET3: return("RESET3"); + case BT_STATE_RESTART: return("RESTART"); + case BT_STATE_HOSED: return("HOSED"); } return("BAD STATE"); } #define STATE2TXT state2txt(bt->state) -static char *status2txt(unsigned char status) +static char *status2txt(unsigned char status, char *buf) { - /* - * This cannot be called by two threads at the same time and - * the buffer is always consumed immediately, so the static is - * safe to use. - */ - static char buf[40]; - strcpy(buf, "[ "); - if (status & BT_B_BUSY) - strcat(buf, "B_BUSY "); - if (status & BT_H_BUSY) - strcat(buf, "H_BUSY "); - if (status & BT_OEM0) - strcat(buf, "OEM0 "); - if (status & BT_SMS_ATN) - strcat(buf, "SMS "); - if (status & BT_B2H_ATN) - strcat(buf, "B2H "); - if (status & BT_H2B_ATN) - strcat(buf, "H2B "); + if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); + if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); + if (status & BT_OEM0) strcat(buf, "OEM0 "); + if (status & BT_SMS_ATN) strcat(buf, "SMS "); + if (status & BT_B2H_ATN) strcat(buf, "B2H "); + if (status & BT_H2B_ATN) strcat(buf, "H2B "); strcat(buf, "]"); return buf; } -#define STATUS2TXT status2txt(status) - -/* called externally at insmod time, and internally on cleanup */ +#define STATUS2TXT(buf) status2txt(status, buf) +/* This will be called from within this module on a hosed condition */ +#define FIRST_SEQ 0 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) { - memset(bt, 0, sizeof(struct si_sm_data)); - if (bt->io != io) { /* external: one-time only things */ - bt->io = io; - bt->seq = 0; - } - bt->state = BT_STATE_IDLE; /* start here */ - bt->complete = BT_STATE_IDLE; /* end here */ - bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; - bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; - /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ + bt->state = BT_STATE_IDLE; + bt->last_state = BT_STATE_IDLE; + bt->seq = FIRST_SEQ; + bt->io = io; + bt->write_count = 0; + bt->read_count = 0; + bt->error_retries = 0; + bt->nonzero_status = 0; + bt->truncated = 0; + bt->timeout = BT_NORMAL_TIMEOUT; return 3; /* We claim 3 bytes of space; ought to check SPMI table */ } -/* Jam a completion code (probably an error) into a response */ - -static void force_result(struct si_sm_data *bt, unsigned char completion_code) -{ - bt->read_data[0] = 4; /* # following bytes */ - bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */ - bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */ - bt->read_data[3] = bt->write_data[3]; /* Command */ - bt->read_data[4] = completion_code; - bt->read_count = 5; -} - -/* The upper state machine starts here */ - static int bt_start_transaction(struct si_sm_data *bt, unsigned char *data, unsigned int size) { unsigned int i; - if (size < 2) - return IPMI_REQ_LEN_INVALID_ERR; - if (size > IPMI_MAX_MSG_LENGTH) - return IPMI_REQ_LEN_EXCEEDED_ERR; + if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2))) + return -1; - if (bt->state == BT_STATE_LONG_BUSY) - return IPMI_NODE_BUSY_ERR; - - if (bt->state != BT_STATE_IDLE) - return IPMI_NOT_IN_MY_STATE_ERR; + if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED)) + return -2; if (bt_debug & BT_DEBUG_MSG) { - printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); - printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); + printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n"); + printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq); for (i = 0; i < size; i ++) - printk (" %02x", data[i]); + printk (" %02x", data[i]); printk("\n"); } bt->write_data[0] = size + 1; /* all data plus seq byte */ bt->write_data[1] = *data; /* NetFn/LUN */ - bt->write_data[2] = bt->seq++; + bt->write_data[2] = bt->seq; memcpy(bt->write_data + 3, data + 1, size - 1); bt->write_count = size + 2; + bt->error_retries = 0; bt->nonzero_status = 0; + bt->read_count = 0; bt->truncated = 0; bt->state = BT_STATE_XACTION_START; - bt->timeout = bt->BT_CAP_req2rsp; - force_result(bt, IPMI_ERR_UNSPECIFIED); + bt->last_state = BT_STATE_IDLE; + bt->timeout = BT_NORMAL_TIMEOUT; return 0; } @@ -248,30 +198,38 @@ static int bt_start_transaction(struct si_sm_data *bt, it calls this. Strip out the length and seq bytes. */ static int bt_get_result(struct si_sm_data *bt, - unsigned char *data, - unsigned int length) + unsigned char *data, + unsigned int length) { int i, msg_len; msg_len = bt->read_count - 2; /* account for length & seq */ + /* Always NetFn, Cmd, cCode */ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { - force_result(bt, IPMI_ERR_UNSPECIFIED); + printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len); + data[0] = bt->write_data[1] | 0x4; /* Kludge a response */ + data[1] = bt->write_data[3]; + data[2] = IPMI_ERR_UNSPECIFIED; msg_len = 3; - } - data[0] = bt->read_data[1]; - data[1] = bt->read_data[3]; - if (length < msg_len || bt->truncated) { - data[2] = IPMI_ERR_MSG_TRUNCATED; - msg_len = 3; - } else - memcpy(data + 2, bt->read_data + 4, msg_len - 2); + } else { + data[0] = bt->read_data[1]; + data[1] = bt->read_data[3]; + if (length < msg_len) + bt->truncated = 1; + if (bt->truncated) { /* can be set in read_all_bytes() */ + data[2] = IPMI_ERR_MSG_TRUNCATED; + msg_len = 3; + } else + memcpy(data + 2, bt->read_data + 4, msg_len - 2); - if (bt_debug & BT_DEBUG_MSG) { - printk (KERN_WARNING "BT: result %d bytes:", msg_len); - for (i = 0; i < msg_len; i++) - printk(" %02x", data[i]); - printk ("\n"); + if (bt_debug & BT_DEBUG_MSG) { + printk (KERN_WARNING "BT: res (raw)"); + for (i = 0; i < msg_len; i++) + printk(" %02x", data[i]); + printk ("\n"); + } } + bt->read_count = 0; /* paranoia */ return msg_len; } @@ -280,40 +238,22 @@ static int bt_get_result(struct si_sm_data *bt, static void reset_flags(struct si_sm_data *bt) { - if (bt_debug) - printk(KERN_WARNING "IPMI BT: flag reset %s\n", - status2txt(BT_STATUS)); if (BT_STATUS & BT_H_BUSY) - BT_CONTROL(BT_H_BUSY); /* force clear */ - BT_CONTROL(BT_CLR_WR_PTR); /* always reset */ - BT_CONTROL(BT_SMS_ATN); /* always clear */ - BT_INTMASK_W(BT_BMC_HWRST); -} - -/* Get rid of an unwanted/stale response. This should only be needed for - BMCs that support multiple outstanding requests. */ - -static void drain_BMC2HOST(struct si_sm_data *bt) -{ - int i, size; - - if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */ - return; - - BT_CONTROL(BT_H_BUSY); /* now set */ - BT_CONTROL(BT_B2H_ATN); /* always clear */ - BT_STATUS; /* pause */ - BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */ - BT_CONTROL(BT_CLR_RD_PTR); /* always reset */ - if (bt_debug) - printk(KERN_WARNING "IPMI BT: stale response %s; ", - status2txt(BT_STATUS)); - size = BMC2HOST; - for (i = 0; i < size ; i++) - BMC2HOST; - BT_CONTROL(BT_H_BUSY); /* now clear */ - if (bt_debug) - printk("drained %d bytes\n", size + 1); + BT_CONTROL(BT_H_BUSY); + if (BT_STATUS & BT_B_BUSY) + BT_CONTROL(BT_B_BUSY); + BT_CONTROL(BT_CLR_WR_PTR); + BT_CONTROL(BT_SMS_ATN); + + if (BT_STATUS & BT_B2H_ATN) { + int i; + BT_CONTROL(BT_H_BUSY); + BT_CONTROL(BT_B2H_ATN); + BT_CONTROL(BT_CLR_RD_PTR); + for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) + BMC2HOST; + BT_CONTROL(BT_H_BUSY); + } } static inline void write_all_bytes(struct si_sm_data *bt) @@ -321,256 +261,201 @@ static inline void write_all_bytes(struct si_sm_data *bt) int i; if (bt_debug & BT_DEBUG_MSG) { - printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", + printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", bt->write_count, bt->seq); for (i = 0; i < bt->write_count; i++) printk (" %02x", bt->write_data[i]); printk ("\n"); } for (i = 0; i < bt->write_count; i++) - HOST2BMC(bt->write_data[i]); + HOST2BMC(bt->write_data[i]); } static inline int read_all_bytes(struct si_sm_data *bt) { unsigned char i; - /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. - Keep layout of first four bytes aligned with write_data[] */ - bt->read_data[0] = BMC2HOST; bt->read_count = bt->read_data[0]; + if (bt_debug & BT_DEBUG_MSG) + printk(KERN_WARNING "BT: read %d bytes:", bt->read_count); + /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more + following the length byte. */ if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { if (bt_debug & BT_DEBUG_MSG) - printk(KERN_WARNING "BT: bad raw rsp len=%d\n", - bt->read_count); + printk("bad length %d\n", bt->read_count); bt->truncated = 1; return 1; /* let next XACTION START clean it up */ } for (i = 1; i <= bt->read_count; i++) - bt->read_data[i] = BMC2HOST; - bt->read_count++; /* Account internally for length byte */ + bt->read_data[i] = BMC2HOST; + bt->read_count++; /* account for the length byte */ if (bt_debug & BT_DEBUG_MSG) { - int max = bt->read_count; - - printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", - max, bt->read_data[2]); - if (max > 16) - max = 16; - for (i = 0; i < max; i++) + for (i = 0; i < bt->read_count; i++) printk (" %02x", bt->read_data[i]); - printk ("%s\n", bt->read_count == max ? "" : " ..."); + printk ("\n"); } + if (bt->seq != bt->write_data[2]) /* idiot check */ + printk(KERN_DEBUG "BT: internal error: sequence mismatch\n"); - /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ - if ((bt->read_data[3] == bt->write_data[3]) && - (bt->read_data[2] == bt->write_data[2]) && - ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) + /* per the spec, the (NetFn, Seq, Cmd) tuples should match */ + if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */ + (bt->read_data[2] == bt->write_data[2]) && /* Sequence */ + ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) return 1; if (bt_debug & BT_DEBUG_MSG) - printk(KERN_WARNING "IPMI BT: bad packet: " + printk(KERN_WARNING "BT: bad packet: " "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", - bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], + bt->write_data[1], bt->write_data[2], bt->write_data[3], bt->read_data[1], bt->read_data[2], bt->read_data[3]); return 0; } -/* Restart if retries are left, or return an error completion code */ +/* Modifies bt->state appropriately, need to get into the bt_event() switch */ -static enum si_sm_result error_recovery(struct si_sm_data *bt, - unsigned char status, - unsigned char cCode) +static void error_recovery(struct si_sm_data *bt, char *reason) { - char *reason; + unsigned char status; + char buf[40]; /* For getting status */ - bt->timeout = bt->BT_CAP_req2rsp; - - switch (cCode) { - case IPMI_TIMEOUT_ERR: - reason = "timeout"; - break; - default: - reason = "internal error"; - break; - } + bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */ - printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ - reason, STATE2TXT, STATUS2TXT); + status = BT_STATUS; + printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT, + STATUS2TXT(buf)); - /* Per the IPMI spec, retries are based on the sequence number - known only to this module, so manage a restart here. */ (bt->error_retries)++; - if (bt->error_retries < bt->BT_CAP_retries) { - printk("%d retries left\n", - bt->BT_CAP_retries - bt->error_retries); - bt->state = BT_STATE_RESTART; - return SI_SM_CALL_WITHOUT_DELAY; + if (bt->error_retries > BT_RETRY_LIMIT) { + printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT); + bt->state = BT_STATE_HOSED; + if (!bt->nonzero_status) + printk(KERN_ERR "IPMI: BT stuck, try power cycle\n"); + else if (bt->error_retries <= BT_RETRY_LIMIT + 1) { + printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n"); + bt->state = BT_STATE_RESET1; + } + return; } - printk("failed %d retries, sending error response\n", - bt->BT_CAP_retries); - if (!bt->nonzero_status) - printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); - - /* this is most likely during insmod */ - else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { - printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); - bt->state = BT_STATE_RESET1; - return SI_SM_CALL_WITHOUT_DELAY; + /* Sometimes the BMC queues get in an "off-by-one" state...*/ + if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) { + printk(KERN_DEBUG "retry B2H_WAIT\n"); + return; } - /* Concoct a useful error message, set up the next state, and - be done with this sequence. */ - - bt->state = BT_STATE_IDLE; - switch (cCode) { - case IPMI_TIMEOUT_ERR: - if (status & BT_B_BUSY) { - cCode = IPMI_NODE_BUSY_ERR; - bt->state = BT_STATE_LONG_BUSY; - } - break; - default: - break; - } - force_result(bt, cCode); - return SI_SM_TRANSACTION_COMPLETE; + printk(KERN_DEBUG "restart command\n"); + bt->state = BT_STATE_RESTART; } -/* Check status and (usually) take action and change this state machine. */ +/* Check the status and (possibly) advance the BT state machine. The + default return is SI_SM_CALL_WITH_DELAY. */ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { - unsigned char status, BT_CAP[8]; - static enum bt_states last_printed = BT_STATE_PRINTME; + unsigned char status; + char buf[40]; /* For getting status */ int i; status = BT_STATUS; bt->nonzero_status |= status; - if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { + + if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state)) printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, - STATUS2TXT, + STATUS2TXT(buf), bt->timeout, time); - last_printed = bt->state; - } + bt->last_state = bt->state; - /* Commands that time out may still (eventually) provide a response. - This stale response will get in the way of a new response so remove - it if possible (hopefully during IDLE). Even if it comes up later - it will be rejected by its (now-forgotten) seq number. */ - - if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { - drain_BMC2HOST(bt); - BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); - } + if (bt->state == BT_STATE_HOSED) + return SI_SM_HOSED; - if ((bt->state != BT_STATE_IDLE) && - (bt->state < BT_STATE_PRINTME)) { /* check timeout */ + if (bt->state != BT_STATE_IDLE) { /* do timeout test */ bt->timeout -= time; - if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) - return error_recovery(bt, - status, - IPMI_TIMEOUT_ERR); + if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { + error_recovery(bt, "timed out"); + return SI_SM_CALL_WITHOUT_DELAY; + } } switch (bt->state) { - /* Idle state first checks for asynchronous messages from another - channel, then does some opportunistic housekeeping. */ - - case BT_STATE_IDLE: + case BT_STATE_IDLE: /* check for asynchronous messages */ if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } - - if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ - BT_CONTROL(BT_H_BUSY); - - /* Read BT capabilities if it hasn't been done yet */ - if (!bt->BT_CAP_outreqs) - BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, - SI_SM_CALL_WITHOUT_DELAY); - bt->timeout = bt->BT_CAP_req2rsp; - BT_SI_SM_RETURN(SI_SM_IDLE); + return SI_SM_IDLE; case BT_STATE_XACTION_START: - if (status & (BT_B_BUSY | BT_H2B_ATN)) - BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); - if (BT_STATUS & BT_H_BUSY) - BT_CONTROL(BT_H_BUSY); /* force clear */ - BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, - SI_SM_CALL_WITHOUT_DELAY); + if (status & BT_H_BUSY) { + BT_CONTROL(BT_H_BUSY); + break; + } + if (status & BT_B2H_ATN) + break; + bt->state = BT_STATE_WRITE_BYTES; + return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ case BT_STATE_WRITE_BYTES: - if (status & BT_H_BUSY) - BT_CONTROL(BT_H_BUSY); /* clear */ + if (status & (BT_B_BUSY | BT_H2B_ATN)) + break; BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); - BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ - BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, - SI_SM_CALL_WITHOUT_DELAY); + BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */ + bt->state = BT_STATE_WRITE_CONSUME; + return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */ + + case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */ + if (status & (BT_H2B_ATN | BT_B_BUSY)) + break; + bt->state = BT_STATE_B2H_WAIT; + /* fall through with status */ + + /* Stay in BT_STATE_B2H_WAIT until a packet matches. However, spinning + hard here, constantly reading status, seems to hold off the + generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */ + + case BT_STATE_B2H_WAIT: + if (!(status & BT_B2H_ATN)) + break; + + /* Assume ordered, uncached writes: no need to wait */ + if (!(status & BT_H_BUSY)) + BT_CONTROL(BT_H_BUSY); /* set */ + BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */ + BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */ + i = read_all_bytes(bt); + BT_CONTROL(BT_H_BUSY); /* clear */ + if (!i) /* Try this state again */ + break; + bt->state = BT_STATE_READ_END; + return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ + + case BT_STATE_READ_END: + + /* I could wait on BT_H_BUSY to go clear for a truly clean + exit. However, this is already done in XACTION_START + and the (possible) extra loop/status/possible wait affects + performance. So, as long as it works, just ignore H_BUSY */ + +#ifdef MAKE_THIS_TRUE_IF_NECESSARY - case BT_STATE_WRITE_CONSUME: - if (status & (BT_B_BUSY | BT_H2B_ATN)) - BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); - BT_STATE_CHANGE(BT_STATE_READ_WAIT, - SI_SM_CALL_WITHOUT_DELAY); - - /* Spinning hard can suppress B2H_ATN and force a timeout */ - - case BT_STATE_READ_WAIT: - if (!(status & BT_B2H_ATN)) - BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); - BT_CONTROL(BT_H_BUSY); /* set */ - - /* Uncached, ordered writes should just proceeed serially but - some BMCs don't clear B2H_ATN with one hit. Fast-path a - workaround without too much penalty to the general case. */ - - BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ - BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, - SI_SM_CALL_WITHOUT_DELAY); - - case BT_STATE_CLEAR_B2H: - if (status & BT_B2H_ATN) { /* keep hitting it */ - BT_CONTROL(BT_B2H_ATN); - BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); - } - BT_STATE_CHANGE(BT_STATE_READ_BYTES, - SI_SM_CALL_WITHOUT_DELAY); - - case BT_STATE_READ_BYTES: - if (!(status & BT_H_BUSY)) /* check in case of retry */ - BT_CONTROL(BT_H_BUSY); - BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ - i = read_all_bytes(bt); /* true == packet seq match */ - BT_CONTROL(BT_H_BUSY); /* NOW clear */ - if (!i) /* Not my message */ - BT_STATE_CHANGE(BT_STATE_READ_WAIT, - SI_SM_CALL_WITHOUT_DELAY); - bt->state = bt->complete; - return bt->state == BT_STATE_IDLE ? /* where to next? */ - SI_SM_TRANSACTION_COMPLETE : /* normal */ - SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ - - case BT_STATE_LONG_BUSY: /* For example: after FW update */ - if (!(status & BT_B_BUSY)) { - reset_flags(bt); /* next state is now IDLE */ - bt_init_data(bt, bt->io); - } - return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ + if (status & BT_H_BUSY) + break; +#endif + bt->seq++; + bt->state = BT_STATE_IDLE; + return SI_SM_TRANSACTION_COMPLETE; case BT_STATE_RESET1: - reset_flags(bt); - drain_BMC2HOST(bt); - BT_STATE_CHANGE(BT_STATE_RESET2, - SI_SM_CALL_WITH_DELAY); + reset_flags(bt); + bt->timeout = BT_RESET_DELAY; + bt->state = BT_STATE_RESET2; + break; case BT_STATE_RESET2: /* Send a soft reset */ BT_CONTROL(BT_CLR_WR_PTR); @@ -579,59 +464,29 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); - bt->timeout = BT_RESET_DELAY * 1000000; - BT_STATE_CHANGE(BT_STATE_RESET3, - SI_SM_CALL_WITH_DELAY); + bt->state = BT_STATE_RESET3; + break; - case BT_STATE_RESET3: /* Hold off everything for a bit */ + case BT_STATE_RESET3: if (bt->timeout > 0) - return SI_SM_CALL_WITH_DELAY; - drain_BMC2HOST(bt); - BT_STATE_CHANGE(BT_STATE_RESTART, - SI_SM_CALL_WITH_DELAY); + return SI_SM_CALL_WITH_DELAY; + bt->state = BT_STATE_RESTART; /* printk in debug modes */ + break; - case BT_STATE_RESTART: /* don't reset retries or seq! */ + case BT_STATE_RESTART: /* don't reset retries! */ + reset_flags(bt); + bt->write_data[2] = ++bt->seq; bt->read_count = 0; bt->nonzero_status = 0; - bt->timeout = bt->BT_CAP_req2rsp; - BT_STATE_CHANGE(BT_STATE_XACTION_START, - SI_SM_CALL_WITH_DELAY); - - /* Get BT Capabilities, using timing of upper level state machine. - Set outreqs to prevent infinite loop on timeout. */ - case BT_STATE_CAPABILITIES_BEGIN: - bt->BT_CAP_outreqs = 1; - { - unsigned char GetBT_CAP[] = { 0x18, 0x36 }; - bt->state = BT_STATE_IDLE; - bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); - } - bt->complete = BT_STATE_CAPABILITIES_END; - BT_STATE_CHANGE(BT_STATE_XACTION_START, - SI_SM_CALL_WITH_DELAY); - - case BT_STATE_CAPABILITIES_END: - i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); - bt_init_data(bt, bt->io); - if ((i == 8) && !BT_CAP[2]) { - bt->BT_CAP_outreqs = BT_CAP[3]; - bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; - bt->BT_CAP_retries = BT_CAP[7]; - } else - printk(KERN_WARNING "IPMI BT: using default values\n"); - if (!bt->BT_CAP_outreqs) - bt->BT_CAP_outreqs = 1; - printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", - bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); - bt->timeout = bt->BT_CAP_req2rsp; - return SI_SM_CALL_WITHOUT_DELAY; - - default: /* should never occur */ - return error_recovery(bt, - status, - IPMI_ERR_UNSPECIFIED); - } - return SI_SM_CALL_WITH_DELAY; + bt->timeout = BT_NORMAL_TIMEOUT; + bt->state = BT_STATE_XACTION_START; + break; + + default: /* HOSED is supposed to be caught much earlier */ + error_recovery(bt, "internal logic error"); + break; + } + return SI_SM_CALL_WITH_DELAY; } static int bt_detect(struct si_sm_data *bt) @@ -642,7 +497,7 @@ static int bt_detect(struct si_sm_data *bt) test that first. The calling routine uses negative logic. */ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) - return 1; + return 1; reset_flags(bt); return 0; } @@ -658,11 +513,11 @@ static int bt_size(void) struct si_sm_handlers bt_smi_handlers = { - .init_data = bt_init_data, - .start_transaction = bt_start_transaction, - .get_result = bt_get_result, - .event = bt_event, - .detect = bt_detect, - .cleanup = bt_cleanup, - .size = bt_size, + .init_data = bt_init_data, + .start_transaction = bt_start_transaction, + .get_result = bt_get_result, + .event = bt_event, + .detect = bt_detect, + .cleanup = bt_cleanup, + .size = bt_size, }; diff --git a/trunk/drivers/char/ipmi/ipmi_devintf.c b/trunk/drivers/char/ipmi/ipmi_devintf.c index 375d3378eecd..81fcf0ce21d1 100644 --- a/trunk/drivers/char/ipmi/ipmi_devintf.c +++ b/trunk/drivers/char/ipmi/ipmi_devintf.c @@ -596,31 +596,6 @@ static int ipmi_ioctl(struct inode *inode, rv = 0; break; } - - case IPMICTL_GET_MAINTENANCE_MODE_CMD: - { - int mode; - - mode = ipmi_get_maintenance_mode(priv->user); - if (copy_to_user(arg, &mode, sizeof(mode))) { - rv = -EFAULT; - break; - } - rv = 0; - break; - } - - case IPMICTL_SET_MAINTENANCE_MODE_CMD: - { - int mode; - - if (copy_from_user(&mode, arg, sizeof(mode))) { - rv = -EFAULT; - break; - } - rv = ipmi_set_maintenance_mode(priv->user, mode); - break; - } } return rv; diff --git a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c index c1b8228cb7b6..2062675f9e99 100644 --- a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c @@ -93,8 +93,8 @@ enum kcs_states { state machine. */ }; -#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH -#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH +#define MAX_KCS_READ_SIZE 80 +#define MAX_KCS_WRITE_SIZE 80 /* Timeouts in microseconds. */ #define IBF_RETRY_TIMEOUT 1000000 @@ -261,14 +261,12 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, { unsigned int i; - if (size < 2) - return IPMI_REQ_LEN_INVALID_ERR; - if (size > MAX_KCS_WRITE_SIZE) - return IPMI_REQ_LEN_EXCEEDED_ERR; - - if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) - return IPMI_NOT_IN_MY_STATE_ERR; - + if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) { + return -1; + } + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { + return -2; + } if (kcs_debug & KCS_DEBUG_MSG) { printk(KERN_DEBUG "start_kcs_transaction -"); for (i = 0; i < size; i ++) { diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c index 5703ee28e1cc..c47add8e47df 100644 --- a/trunk/drivers/char/ipmi/ipmi_msghandler.c +++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c @@ -48,7 +48,7 @@ #define PFX "IPMI message handler: " -#define IPMI_DRIVER_VERSION "39.1" +#define IPMI_DRIVER_VERSION "39.0" static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); @@ -59,9 +59,6 @@ static int initialized = 0; static struct proc_dir_entry *proc_ipmi_root = NULL; #endif /* CONFIG_PROC_FS */ -/* Remain in auto-maintenance mode for this amount of time (in ms). */ -#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 - #define MAX_EVENTS_IN_QUEUE 25 /* Don't let a message sit in a queue forever, always time it with at lest @@ -196,28 +193,17 @@ struct ipmi_smi struct kref refcount; - /* Used for a list of interfaces. */ - struct list_head link; - /* The list of upper layers that are using me. seq_lock * protects this. */ struct list_head users; - /* Information to supply to users. */ - unsigned char ipmi_version_major; - unsigned char ipmi_version_minor; - /* Used for wake ups at startup. */ wait_queue_head_t waitq; struct bmc_device *bmc; char *my_dev_name; - char *sysfs_name; - /* This is the lower-layer's sender routine. Note that you - * must either be holding the ipmi_interfaces_mutex or be in - * an umpreemptible region to use this. You must fetch the - * value into a local variable and make sure it is not NULL. */ + /* This is the lower-layer's sender routine. */ struct ipmi_smi_handlers *handlers; void *send_info; @@ -256,7 +242,6 @@ struct ipmi_smi spinlock_t events_lock; /* For dealing with event stuff. */ struct list_head waiting_events; unsigned int waiting_events_count; /* How many events in queue? */ - int delivering_events; /* The event receiver for my BMC, only really used at panic shutdown as a place to store this. */ @@ -265,12 +250,6 @@ struct ipmi_smi unsigned char local_sel_device; unsigned char local_event_generator; - /* For handling of maintenance mode. */ - int maintenance_mode; - int maintenance_mode_enable; - int auto_maintenance_timeout; - spinlock_t maintenance_mode_lock; /* Used in a timer... */ - /* A cheap hack, if this is non-null and a message to an interface comes in with a NULL user, call this routine with it. Note that the message will still be freed by the @@ -359,6 +338,13 @@ struct ipmi_smi }; #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) +/* Used to mark an interface entry that cannot be used but is not a + * free entry, either, primarily used at creation and deletion time so + * a slot doesn't get reused too quickly. */ +#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1)) +#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ + || (i == IPMI_INVALID_INTERFACE_ENTRY)) + /** * The driver model view of the IPMI messaging driver. */ @@ -368,13 +354,16 @@ static struct device_driver ipmidriver = { }; static DEFINE_MUTEX(ipmidriver_mutex); -static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces); -static DEFINE_MUTEX(ipmi_interfaces_mutex); +#define MAX_IPMI_INTERFACES 4 +static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; + +/* Directly protects the ipmi_interfaces data structure. */ +static DEFINE_SPINLOCK(interfaces_lock); /* List of watchers that want to know when smi's are added and deleted. */ static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); -static DEFINE_MUTEX(smi_watchers_mutex); +static DECLARE_RWSEM(smi_watchers_sem); static void free_recv_msg_list(struct list_head *q) @@ -434,84 +423,48 @@ static void intf_free(struct kref *ref) kfree(intf); } -struct watcher_entry { - int intf_num; - ipmi_smi_t intf; - struct list_head link; -}; - int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { - ipmi_smi_t intf; - struct list_head to_deliver = LIST_HEAD_INIT(to_deliver); - struct watcher_entry *e, *e2; - - mutex_lock(&smi_watchers_mutex); - - mutex_lock(&ipmi_interfaces_mutex); + int i; + unsigned long flags; - /* Build a list of things to deliver. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (intf->intf_num == -1) + down_write(&smi_watchers_sem); + list_add(&(watcher->link), &smi_watchers); + up_write(&smi_watchers_sem); + spin_lock_irqsave(&interfaces_lock, flags); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + ipmi_smi_t intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) continue; - e = kmalloc(sizeof(*e), GFP_KERNEL); - if (!e) - goto out_err; - kref_get(&intf->refcount); - e->intf = intf; - e->intf_num = intf->intf_num; - list_add_tail(&e->link, &to_deliver); + spin_unlock_irqrestore(&interfaces_lock, flags); + watcher->new_smi(i, intf->si_dev); + spin_lock_irqsave(&interfaces_lock, flags); } - - /* We will succeed, so add it to the list. */ - list_add(&watcher->link, &smi_watchers); - - mutex_unlock(&ipmi_interfaces_mutex); - - list_for_each_entry_safe(e, e2, &to_deliver, link) { - list_del(&e->link); - watcher->new_smi(e->intf_num, e->intf->si_dev); - kref_put(&e->intf->refcount, intf_free); - kfree(e); - } - - mutex_unlock(&smi_watchers_mutex); - + spin_unlock_irqrestore(&interfaces_lock, flags); return 0; - - out_err: - mutex_unlock(&ipmi_interfaces_mutex); - mutex_unlock(&smi_watchers_mutex); - list_for_each_entry_safe(e, e2, &to_deliver, link) { - list_del(&e->link); - kref_put(&e->intf->refcount, intf_free); - kfree(e); - } - return -ENOMEM; } int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) { - mutex_lock(&smi_watchers_mutex); + down_write(&smi_watchers_sem); list_del(&(watcher->link)); - mutex_unlock(&smi_watchers_mutex); + up_write(&smi_watchers_sem); return 0; } -/* - * Must be called with smi_watchers_mutex held. - */ static void call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; + down_read(&smi_watchers_sem); list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { w->new_smi(i, dev); module_put(w->owner); } } + up_read(&smi_watchers_sem); } static int @@ -637,17 +590,6 @@ static void deliver_response(struct ipmi_recv_msg *msg) } } -static void -deliver_err_response(struct ipmi_recv_msg *msg, int err) -{ - msg->recv_type = IPMI_RESPONSE_RECV_TYPE; - msg->msg_data[0] = err; - msg->msg.netfn |= 1; /* Convert to a response. */ - msg->msg.data_len = 1; - msg->msg.data = msg->msg_data; - deliver_response(msg); -} - /* Find the next sequence number not being used and add the given message with the given timeout to the sequence table. This must be called with the interface's seq_lock held. */ @@ -785,8 +727,14 @@ static int intf_err_seq(ipmi_smi_t intf, } spin_unlock_irqrestore(&(intf->seq_lock), flags); - if (msg) - deliver_err_response(msg, err); + if (msg) { + msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + msg->msg_data[0] = err; + msg->msg.netfn |= 1; /* Convert to a response. */ + msg->msg.data_len = 1; + msg->msg.data = msg->msg_data; + deliver_response(msg); + } return rv; } @@ -828,18 +776,17 @@ int ipmi_create_user(unsigned int if_num, if (!new_user) return -ENOMEM; - mutex_lock(&ipmi_interfaces_mutex); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (intf->intf_num == if_num) - goto found; + spin_lock_irqsave(&interfaces_lock, flags); + intf = ipmi_interfaces[if_num]; + if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) { + spin_unlock_irqrestore(&interfaces_lock, flags); + rv = -EINVAL; + goto out_kfree; } - /* Not found, return an error */ - rv = -EINVAL; - goto out_kfree; - found: /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); + spin_unlock_irqrestore(&interfaces_lock, flags); kref_init(&new_user->refcount); new_user->handler = handler; @@ -860,10 +807,6 @@ int ipmi_create_user(unsigned int if_num, } } - /* Hold the lock so intf->handlers is guaranteed to be good - * until now */ - mutex_unlock(&ipmi_interfaces_mutex); - new_user->valid = 1; spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); @@ -874,7 +817,6 @@ int ipmi_create_user(unsigned int if_num, out_kref: kref_put(&intf->refcount, intf_free); out_kfree: - mutex_unlock(&ipmi_interfaces_mutex); kfree(new_user); return rv; } @@ -904,7 +846,6 @@ int ipmi_destroy_user(ipmi_user_t user) && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; - ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } spin_unlock_irqrestore(&intf->seq_lock, flags); @@ -931,13 +872,9 @@ int ipmi_destroy_user(ipmi_user_t user) kfree(rcvr); } - mutex_lock(&ipmi_interfaces_mutex); - if (intf->handlers) { - module_put(intf->handlers->owner); - if (intf->handlers->dec_usecount) - intf->handlers->dec_usecount(intf->send_info); - } - mutex_unlock(&ipmi_interfaces_mutex); + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); kref_put(&intf->refcount, intf_free); @@ -950,8 +887,8 @@ void ipmi_get_version(ipmi_user_t user, unsigned char *major, unsigned char *minor) { - *major = user->intf->ipmi_version_major; - *minor = user->intf->ipmi_version_minor; + *major = ipmi_version_major(&user->intf->bmc->id); + *minor = ipmi_version_minor(&user->intf->bmc->id); } int ipmi_set_my_address(ipmi_user_t user, @@ -994,65 +931,6 @@ int ipmi_get_my_LUN(ipmi_user_t user, return 0; } -int ipmi_get_maintenance_mode(ipmi_user_t user) -{ - int mode; - unsigned long flags; - - spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); - mode = user->intf->maintenance_mode; - spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); - - return mode; -} -EXPORT_SYMBOL(ipmi_get_maintenance_mode); - -static void maintenance_mode_update(ipmi_smi_t intf) -{ - if (intf->handlers->set_maintenance_mode) - intf->handlers->set_maintenance_mode( - intf->send_info, intf->maintenance_mode_enable); -} - -int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) -{ - int rv = 0; - unsigned long flags; - ipmi_smi_t intf = user->intf; - - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - if (intf->maintenance_mode != mode) { - switch (mode) { - case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable - = (intf->auto_maintenance_timeout > 0); - break; - - case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 0; - break; - - case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 1; - break; - - default: - rv = -EINVAL; - goto out_unlock; - } - - maintenance_mode_update(intf); - } - out_unlock: - spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); - - return rv; -} -EXPORT_SYMBOL(ipmi_set_maintenance_mode); - int ipmi_set_gets_events(ipmi_user_t user, int val) { unsigned long flags; @@ -1065,33 +943,20 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) spin_lock_irqsave(&intf->events_lock, flags); user->gets_events = val; - if (intf->delivering_events) - /* - * Another thread is delivering events for this, so - * let it handle any new events. - */ - goto out; - - /* Deliver any queued events. */ - while (user->gets_events && !list_empty(&intf->waiting_events)) { + if (val) { + /* Deliver any queued events. */ list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) list_move_tail(&msg->link, &msgs); intf->waiting_events_count = 0; + } - intf->delivering_events = 1; - spin_unlock_irqrestore(&intf->events_lock, flags); - - list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); - deliver_response(msg); - } - - spin_lock_irqsave(&intf->events_lock, flags); - intf->delivering_events = 0; + /* Hold the events lock while doing this to preserve order. */ + list_for_each_entry_safe(msg, msg2, &msgs, link) { + msg->user = user; + kref_get(&user->refcount); + deliver_response(msg); } - out: spin_unlock_irqrestore(&intf->events_lock, flags); return 0; @@ -1202,8 +1067,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) { ipmi_smi_t intf = user->intf; - if (intf->handlers) - intf->handlers->set_run_to_completion(intf->send_info, val); + intf->handlers->set_run_to_completion(intf->send_info, val); } static unsigned char @@ -1314,11 +1178,10 @@ static int i_ipmi_request(ipmi_user_t user, int retries, unsigned int retry_time_ms) { - int rv = 0; - struct ipmi_smi_msg *smi_msg; - struct ipmi_recv_msg *recv_msg; - unsigned long flags; - struct ipmi_smi_handlers *handlers; + int rv = 0; + struct ipmi_smi_msg *smi_msg; + struct ipmi_recv_msg *recv_msg; + unsigned long flags; if (supplied_recv) { @@ -1341,13 +1204,6 @@ static int i_ipmi_request(ipmi_user_t user, } } - rcu_read_lock(); - handlers = intf->handlers; - if (!handlers) { - rv = -ENODEV; - goto out_err; - } - recv_msg->user = user; if (user) kref_get(&user->refcount); @@ -1390,24 +1246,6 @@ static int i_ipmi_request(ipmi_user_t user, goto out_err; } - if (((msg->netfn == IPMI_NETFN_APP_REQUEST) - && ((msg->cmd == IPMI_COLD_RESET_CMD) - || (msg->cmd == IPMI_WARM_RESET_CMD))) - || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) - { - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - intf->auto_maintenance_timeout - = IPMI_MAINTENANCE_MODE_TIMEOUT; - if (!intf->maintenance_mode - && !intf->maintenance_mode_enable) - { - intf->maintenance_mode_enable = 1; - maintenance_mode_update(intf); - } - spin_unlock_irqrestore(&intf->maintenance_mode_lock, - flags); - } - if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { spin_lock_irqsave(&intf->counter_lock, flags); intf->sent_invalid_commands++; @@ -1682,14 +1520,11 @@ static int i_ipmi_request(ipmi_user_t user, printk("\n"); } #endif - - handlers->sender(intf->send_info, smi_msg, priority); - rcu_read_unlock(); + intf->handlers->sender(intf->send_info, smi_msg, priority); return 0; out_err: - rcu_read_unlock(); ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); return rv; @@ -1769,7 +1604,6 @@ int ipmi_request_supply_msgs(ipmi_user_t user, -1, 0); } -#ifdef CONFIG_PROC_FS static int ipmb_file_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -1858,7 +1692,6 @@ static int stat_file_read_proc(char *page, char **start, off_t off, return (out - ((char *) page)); } -#endif /* CONFIG_PROC_FS */ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, read_proc_t *read_proc, write_proc_t *write_proc, @@ -1984,12 +1817,13 @@ static int __find_bmc_prod_dev_id(struct device *dev, void *data) struct bmc_device *bmc = dev_get_drvdata(dev); return (bmc->id.product_id == id->product_id + && bmc->id.product_id == id->product_id && bmc->id.device_id == id->device_id); } static struct bmc_device *ipmi_find_bmc_prod_dev_id( struct device_driver *drv, - unsigned int product_id, unsigned char device_id) + unsigned char product_id, unsigned char device_id) { struct prod_dev_id id = { .product_id = product_id, @@ -2106,9 +1940,6 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, static void remove_files(struct bmc_device *bmc) { - if (!bmc->dev) - return; - device_remove_file(&bmc->dev->dev, &bmc->device_id_attr); device_remove_file(&bmc->dev->dev, @@ -2142,8 +1973,7 @@ cleanup_bmc_device(struct kref *ref) bmc = container_of(ref, struct bmc_device, refcount); remove_files(bmc); - if (bmc->dev) - platform_device_unregister(bmc->dev); + platform_device_unregister(bmc->dev); kfree(bmc); } @@ -2151,11 +1981,7 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) { struct bmc_device *bmc = intf->bmc; - if (intf->sysfs_name) { - sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name); - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; - } + sysfs_remove_link(&intf->si_dev->kobj, "bmc"); if (intf->my_dev_name) { sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); kfree(intf->my_dev_name); @@ -2164,7 +1990,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) mutex_lock(&ipmidriver_mutex); kref_put(&bmc->refcount, cleanup_bmc_device); - intf->bmc = NULL; mutex_unlock(&ipmidriver_mutex); } @@ -2172,56 +1997,6 @@ static int create_files(struct bmc_device *bmc) { int err; - bmc->device_id_attr.attr.name = "device_id"; - bmc->device_id_attr.attr.owner = THIS_MODULE; - bmc->device_id_attr.attr.mode = S_IRUGO; - bmc->device_id_attr.show = device_id_show; - - bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; - bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; - bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; - bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; - - bmc->revision_attr.attr.name = "revision"; - bmc->revision_attr.attr.owner = THIS_MODULE; - bmc->revision_attr.attr.mode = S_IRUGO; - bmc->revision_attr.show = revision_show; - - bmc->firmware_rev_attr.attr.name = "firmware_revision"; - bmc->firmware_rev_attr.attr.owner = THIS_MODULE; - bmc->firmware_rev_attr.attr.mode = S_IRUGO; - bmc->firmware_rev_attr.show = firmware_rev_show; - - bmc->version_attr.attr.name = "ipmi_version"; - bmc->version_attr.attr.owner = THIS_MODULE; - bmc->version_attr.attr.mode = S_IRUGO; - bmc->version_attr.show = ipmi_version_show; - - bmc->add_dev_support_attr.attr.name = "additional_device_support"; - bmc->add_dev_support_attr.attr.owner = THIS_MODULE; - bmc->add_dev_support_attr.attr.mode = S_IRUGO; - bmc->add_dev_support_attr.show = add_dev_support_show; - - bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; - bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; - bmc->manufacturer_id_attr.attr.mode = S_IRUGO; - bmc->manufacturer_id_attr.show = manufacturer_id_show; - - bmc->product_id_attr.attr.name = "product_id"; - bmc->product_id_attr.attr.owner = THIS_MODULE; - bmc->product_id_attr.attr.mode = S_IRUGO; - bmc->product_id_attr.show = product_id_show; - - bmc->guid_attr.attr.name = "guid"; - bmc->guid_attr.attr.owner = THIS_MODULE; - bmc->guid_attr.attr.mode = S_IRUGO; - bmc->guid_attr.show = guid_show; - - bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; - bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; - bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; - bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; - err = device_create_file(&bmc->dev->dev, &bmc->device_id_attr); if (err) goto out; @@ -2291,8 +2066,7 @@ static int create_files(struct bmc_device *bmc) return err; } -static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, - const char *sysfs_name) +static int ipmi_bmc_register(ipmi_smi_t intf) { int rv; struct bmc_device *bmc = intf->bmc; @@ -2332,39 +2106,9 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, bmc->id.product_id, bmc->id.device_id); } else { - char name[14]; - unsigned char orig_dev_id = bmc->id.device_id; - int warn_printed = 0; - - snprintf(name, sizeof(name), - "ipmi_bmc.%4.4x", bmc->id.product_id); - - while (ipmi_find_bmc_prod_dev_id(&ipmidriver, - bmc->id.product_id, - bmc->id.device_id)) - { - if (!warn_printed) { - printk(KERN_WARNING PFX - "This machine has two different BMCs" - " with the same product id and device" - " id. This is an error in the" - " firmware, but incrementing the" - " device id to work around the problem." - " Prod ID = 0x%x, Dev ID = 0x%x\n", - bmc->id.product_id, bmc->id.device_id); - warn_printed = 1; - } - bmc->id.device_id++; /* Wraps at 255 */ - if (bmc->id.device_id == orig_dev_id) { - printk(KERN_ERR PFX - "Out of device ids!\n"); - break; - } - } - - bmc->dev = platform_device_alloc(name, bmc->id.device_id); + bmc->dev = platform_device_alloc("ipmi_bmc", + bmc->id.device_id); if (!bmc->dev) { - mutex_unlock(&ipmidriver_mutex); printk(KERN_ERR "ipmi_msghandler:" " Unable to allocate platform device\n"); @@ -2377,8 +2121,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, rv = platform_device_add(bmc->dev); mutex_unlock(&ipmidriver_mutex); if (rv) { - platform_device_put(bmc->dev); - bmc->dev = NULL; printk(KERN_ERR "ipmi_msghandler:" " Unable to register bmc device: %d\n", @@ -2388,6 +2130,57 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, return rv; } + bmc->device_id_attr.attr.name = "device_id"; + bmc->device_id_attr.attr.owner = THIS_MODULE; + bmc->device_id_attr.attr.mode = S_IRUGO; + bmc->device_id_attr.show = device_id_show; + + bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; + bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; + bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; + bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; + + bmc->revision_attr.attr.name = "revision"; + bmc->revision_attr.attr.owner = THIS_MODULE; + bmc->revision_attr.attr.mode = S_IRUGO; + bmc->revision_attr.show = revision_show; + + bmc->firmware_rev_attr.attr.name = "firmware_revision"; + bmc->firmware_rev_attr.attr.owner = THIS_MODULE; + bmc->firmware_rev_attr.attr.mode = S_IRUGO; + bmc->firmware_rev_attr.show = firmware_rev_show; + + bmc->version_attr.attr.name = "ipmi_version"; + bmc->version_attr.attr.owner = THIS_MODULE; + bmc->version_attr.attr.mode = S_IRUGO; + bmc->version_attr.show = ipmi_version_show; + + bmc->add_dev_support_attr.attr.name + = "additional_device_support"; + bmc->add_dev_support_attr.attr.owner = THIS_MODULE; + bmc->add_dev_support_attr.attr.mode = S_IRUGO; + bmc->add_dev_support_attr.show = add_dev_support_show; + + bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; + bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; + bmc->manufacturer_id_attr.attr.mode = S_IRUGO; + bmc->manufacturer_id_attr.show = manufacturer_id_show; + + bmc->product_id_attr.attr.name = "product_id"; + bmc->product_id_attr.attr.owner = THIS_MODULE; + bmc->product_id_attr.attr.mode = S_IRUGO; + bmc->product_id_attr.show = product_id_show; + + bmc->guid_attr.attr.name = "guid"; + bmc->guid_attr.attr.owner = THIS_MODULE; + bmc->guid_attr.attr.mode = S_IRUGO; + bmc->guid_attr.show = guid_show; + + bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; + bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; + bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; + bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; + rv = create_files(bmc); if (rv) { mutex_lock(&ipmidriver_mutex); @@ -2409,44 +2202,29 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, * create symlink from system interface device to bmc device * and back. */ - intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); - if (!intf->sysfs_name) { - rv = -ENOMEM; - printk(KERN_ERR - "ipmi_msghandler: allocate link to BMC: %d\n", - rv); - goto out_err; - } - rv = sysfs_create_link(&intf->si_dev->kobj, - &bmc->dev->dev.kobj, intf->sysfs_name); + &bmc->dev->dev.kobj, "bmc"); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; printk(KERN_ERR "ipmi_msghandler: Unable to create bmc symlink: %d\n", rv); goto out_err; } - size = snprintf(dummy, 0, "ipmi%d", ifnum); + size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); if (!intf->my_dev_name) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; rv = -ENOMEM; printk(KERN_ERR "ipmi_msghandler: allocate link from BMC: %d\n", rv); goto out_err; } - snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum); + snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; kfree(intf->my_dev_name); intf->my_dev_name = NULL; printk(KERN_ERR @@ -2631,14 +2409,17 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *si_dev, - const char *sysfs_name, unsigned char slave_addr) { int i, j; int rv; ipmi_smi_t intf; - ipmi_smi_t tintf; - struct list_head *link; + unsigned long flags; + int version_major; + int version_minor; + + version_major = ipmi_version_major(device_id); + version_minor = ipmi_version_minor(device_id); /* Make sure the driver is actually initialized, this handles problems with initialization order. */ @@ -2656,16 +2437,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, if (!intf) return -ENOMEM; memset(intf, 0, sizeof(*intf)); - - intf->ipmi_version_major = ipmi_version_major(device_id); - intf->ipmi_version_minor = ipmi_version_minor(device_id); - intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); if (!intf->bmc) { kfree(intf); return -ENOMEM; } - intf->intf_num = -1; /* Mark it invalid for now. */ + intf->intf_num = -1; kref_init(&intf->refcount); intf->bmc->id = *device_id; intf->si_dev = si_dev; @@ -2693,30 +2470,26 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); - spin_lock_init(&intf->maintenance_mode_lock); INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); spin_lock_init(&intf->counter_lock); intf->proc_dir = NULL; - mutex_lock(&smi_watchers_mutex); - mutex_lock(&ipmi_interfaces_mutex); - /* Look for a hole in the numbers. */ - i = 0; - link = &ipmi_interfaces; - list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { - if (tintf->intf_num != i) { - link = &tintf->link; + rv = -ENOMEM; + spin_lock_irqsave(&interfaces_lock, flags); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + if (ipmi_interfaces[i] == NULL) { + intf->intf_num = i; + /* Reserve the entry till we are done. */ + ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; + rv = 0; break; } - i++; } - /* Add the new interface in numeric order. */ - if (i == 0) - list_add_rcu(&intf->link, &ipmi_interfaces); - else - list_add_tail_rcu(&intf->link, link); + spin_unlock_irqrestore(&interfaces_lock, flags); + if (rv) + goto out; rv = handlers->start_processing(send_info, intf); if (rv) @@ -2724,9 +2497,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, get_guid(intf); - if ((intf->ipmi_version_major > 1) - || ((intf->ipmi_version_major == 1) - && (intf->ipmi_version_minor >= 5))) + if ((version_major > 1) + || ((version_major == 1) && (version_minor >= 5))) { /* Start scanning the channels to see what is available. */ @@ -2749,67 +2521,64 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, if (rv == 0) rv = add_proc_entries(intf, i); - rv = ipmi_bmc_register(intf, i, sysfs_name); + rv = ipmi_bmc_register(intf); out: if (rv) { if (intf->proc_dir) remove_proc_entries(intf); - intf->handlers = NULL; - list_del_rcu(&intf->link); - mutex_unlock(&ipmi_interfaces_mutex); - mutex_unlock(&smi_watchers_mutex); - synchronize_rcu(); kref_put(&intf->refcount, intf_free); + if (i < MAX_IPMI_INTERFACES) { + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = NULL; + spin_unlock_irqrestore(&interfaces_lock, flags); + } } else { - /* After this point the interface is legal to use. */ - intf->intf_num = i; - mutex_unlock(&ipmi_interfaces_mutex); + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = intf; + spin_unlock_irqrestore(&interfaces_lock, flags); call_smi_watchers(i, intf->si_dev); - mutex_unlock(&smi_watchers_mutex); } return rv; } -static void cleanup_smi_msgs(ipmi_smi_t intf) -{ - int i; - struct seq_table *ent; - - /* No need for locks, the interface is down. */ - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { - ent = &(intf->seq_table[i]); - if (!ent->inuse) - continue; - deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); - } -} - int ipmi_unregister_smi(ipmi_smi_t intf) { + int i; struct ipmi_smi_watcher *w; - int intf_num = intf->intf_num; + unsigned long flags; ipmi_bmc_unregister(intf); - mutex_lock(&smi_watchers_mutex); - mutex_lock(&ipmi_interfaces_mutex); - intf->intf_num = -1; - intf->handlers = NULL; - list_del_rcu(&intf->link); - mutex_unlock(&ipmi_interfaces_mutex); - synchronize_rcu(); + spin_lock_irqsave(&interfaces_lock, flags); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + if (ipmi_interfaces[i] == intf) { + /* Set the interface number reserved until we + * are done. */ + ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; + intf->intf_num = -1; + break; + } + } + spin_unlock_irqrestore(&interfaces_lock,flags); - cleanup_smi_msgs(intf); + if (i == MAX_IPMI_INTERFACES) + return -ENODEV; remove_proc_entries(intf); /* Call all the watcher interfaces to tell them that an interface is gone. */ + down_read(&smi_watchers_sem); list_for_each_entry(w, &smi_watchers, link) - w->smi_gone(intf_num); - mutex_unlock(&smi_watchers_mutex); + w->smi_gone(i); + up_read(&smi_watchers_sem); + + /* Allow the entry to be reused now. */ + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = NULL; + spin_unlock_irqrestore(&interfaces_lock,flags); kref_put(&intf->refcount, intf_free); return 0; @@ -2891,7 +2660,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; unsigned long flags; - struct ipmi_smi_handlers *handlers; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -2948,16 +2716,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, printk("\n"); } #endif - rcu_read_lock(); - handlers = intf->handlers; - if (handlers) { - handlers->sender(intf->send_info, msg, 0); - /* We used the message, so return the value - that causes it to not be freed or - queued. */ - rv = -1; - } - rcu_read_unlock(); + intf->handlers->sender(intf->send_info, msg, 0); + + rv = -1; /* We used the message, so return the value that + causes it to not be freed or queued. */ } else { /* Deliver the message to the user. */ spin_lock_irqsave(&intf->counter_lock, flags); @@ -3547,6 +3309,16 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) rcu_read_unlock(); } +static void +handle_msg_timeout(struct ipmi_recv_msg *msg) +{ + msg->recv_type = IPMI_RESPONSE_RECV_TYPE; + msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE; + msg->msg.netfn |= 1; /* Convert to a response. */ + msg->msg.data_len = 1; + msg->msg.data = msg->msg_data; + deliver_response(msg); +} static struct ipmi_smi_msg * smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, @@ -3578,11 +3350,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct list_head *timeouts, long timeout_period, int slot, unsigned long *flags) { - struct ipmi_recv_msg *msg; - struct ipmi_smi_handlers *handlers; - - if (intf->intf_num == -1) - return; + struct ipmi_recv_msg *msg; if (!ent->inuse) return; @@ -3625,19 +3393,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, return; spin_unlock_irqrestore(&intf->seq_lock, *flags); - /* Send the new message. We send with a zero * priority. It timed out, I doubt time is * that critical now, and high priority * messages are really only for messages to the * local MC, which don't get resent. */ - handlers = intf->handlers; - if (handlers) - intf->handlers->sender(intf->send_info, - smi_msg, 0); - else - ipmi_free_smi_msg(smi_msg); - + intf->handlers->sender(intf->send_info, + smi_msg, 0); spin_lock_irqsave(&intf->seq_lock, *flags); } } @@ -3649,12 +3411,18 @@ static void ipmi_timeout_handler(long timeout_period) struct ipmi_recv_msg *msg, *msg2; struct ipmi_smi_msg *smi_msg, *smi_msg2; unsigned long flags; - int i; + int i, j; INIT_LIST_HEAD(&timeouts); - rcu_read_lock(); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + spin_lock(&interfaces_lock); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) + continue; + kref_get(&intf->refcount); + spin_unlock(&interfaces_lock); + /* See if any waiting messages need to be processed. */ spin_lock_irqsave(&intf->waiting_msgs_lock, flags); list_for_each_entry_safe(smi_msg, smi_msg2, @@ -3674,60 +3442,35 @@ static void ipmi_timeout_handler(long timeout_period) have timed out, putting them in the timeouts list. */ spin_lock_irqsave(&intf->seq_lock, flags); - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) - check_msg_timeout(intf, &(intf->seq_table[i]), - &timeouts, timeout_period, i, + for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) + check_msg_timeout(intf, &(intf->seq_table[j]), + &timeouts, timeout_period, j, &flags); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) - deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); - - /* - * Maintenance mode handling. Check the timeout - * optimistically before we claim the lock. It may - * mean a timeout gets missed occasionally, but that - * only means the timeout gets extended by one period - * in that case. No big deal, and it avoids the lock - * most of the time. - */ - if (intf->auto_maintenance_timeout > 0) { - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - if (intf->auto_maintenance_timeout > 0) { - intf->auto_maintenance_timeout - -= timeout_period; - if (!intf->maintenance_mode - && (intf->auto_maintenance_timeout <= 0)) - { - intf->maintenance_mode_enable = 0; - maintenance_mode_update(intf); - } - } - spin_unlock_irqrestore(&intf->maintenance_mode_lock, - flags); - } + handle_msg_timeout(msg); + + kref_put(&intf->refcount, intf_free); + spin_lock(&interfaces_lock); } - rcu_read_unlock(); + spin_unlock(&interfaces_lock); } static void ipmi_request_event(void) { - ipmi_smi_t intf; - struct ipmi_smi_handlers *handlers; + ipmi_smi_t intf; + int i; - rcu_read_lock(); - /* Called from the timer, no need to check if handlers is - * valid. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) + spin_lock(&interfaces_lock); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) continue; - handlers = intf->handlers; - if (handlers) - handlers->request_events(intf->send_info); + intf->handlers->request_events(intf->send_info); } - rcu_read_unlock(); + spin_unlock(&interfaces_lock); } static struct timer_list ipmi_timer; @@ -3856,6 +3599,7 @@ static void send_panic_events(char *str) struct kernel_ipmi_msg msg; ipmi_smi_t intf; unsigned char data[16]; + int i; struct ipmi_system_interface_addr *si; struct ipmi_addr addr; struct ipmi_smi_msg smi_msg; @@ -3889,9 +3633,9 @@ static void send_panic_events(char *str) recv_msg.done = dummy_recv_done_handler; /* For every registered interface, send the event. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (!intf->handlers) - /* Interface is not ready. */ + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) continue; /* Send the event announcing the panic. */ @@ -3916,14 +3660,13 @@ static void send_panic_events(char *str) if (!str) return; - /* For every registered interface, send the event. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { char *p = str; struct ipmi_ipmb_addr *ipmb; int j; - if (intf->intf_num == -1) - /* Interface was not ready yet. */ + intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) continue; /* First job here is to figure out where to send the @@ -4049,6 +3792,7 @@ static int panic_event(struct notifier_block *this, unsigned long event, void *ptr) { + int i; ipmi_smi_t intf; if (has_panicked) @@ -4056,9 +3800,9 @@ static int panic_event(struct notifier_block *this, has_panicked = 1; /* For every registered interface, set it to run to completion. */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - if (!intf->handlers) - /* Interface is not ready. */ + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) continue; intf->handlers->set_run_to_completion(intf->send_info, 1); @@ -4079,6 +3823,7 @@ static struct notifier_block panic_block = { static int ipmi_init_msghandler(void) { + int i; int rv; if (initialized) @@ -4093,6 +3838,9 @@ static int ipmi_init_msghandler(void) printk(KERN_INFO "ipmi message handler version " IPMI_DRIVER_VERSION "\n"); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) + ipmi_interfaces[i] = NULL; + #ifdef CONFIG_PROC_FS proc_ipmi_root = proc_mkdir("ipmi", NULL); if (!proc_ipmi_root) { diff --git a/trunk/drivers/char/ipmi/ipmi_poweroff.c b/trunk/drivers/char/ipmi/ipmi_poweroff.c index 597eb4f88b84..8d941db83457 100644 --- a/trunk/drivers/char/ipmi/ipmi_poweroff.c +++ b/trunk/drivers/char/ipmi/ipmi_poweroff.c @@ -43,9 +43,6 @@ #define PFX "IPMI poweroff: " -static void ipmi_po_smi_gone(int if_num); -static void ipmi_po_new_smi(int if_num, struct device *device); - /* Definitions for controlling power off (if the system supports it). It * conveniently matches the IPMI chassis control values. */ #define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ @@ -54,37 +51,6 @@ static void ipmi_po_new_smi(int if_num, struct device *device); /* the IPMI data command */ static int poweroff_powercycle; -/* Which interface to use, -1 means the first we see. */ -static int ifnum_to_use = -1; - -/* Our local state. */ -static int ready = 0; -static ipmi_user_t ipmi_user; -static int ipmi_ifnum; -static void (*specific_poweroff_func)(ipmi_user_t user) = NULL; - -/* Holds the old poweroff function so we can restore it on removal. */ -static void (*old_poweroff_func)(void); - -static int set_param_ifnum(const char *val, struct kernel_param *kp) -{ - int rv = param_set_int(val, kp); - if (rv) - return rv; - if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum)) - return 0; - - ipmi_po_smi_gone(ipmi_ifnum); - ipmi_po_new_smi(ifnum_to_use, NULL); - return 0; -} - -module_param_call(ifnum_to_use, set_param_ifnum, param_get_int, - &ifnum_to_use, 0644); -MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " - "timer. Setting to -1 defaults to the first registered " - "interface"); - /* parameter definition to allow user to flag power cycle */ module_param(poweroff_powercycle, int, 0644); MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); @@ -176,42 +142,6 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user, #define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01 #define IPMI_PICMG_ID 0 -#define IPMI_NETFN_OEM 0x2e -#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11 -#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A" -#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 -#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 - -static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL; - -static void pps_poweroff_atca (ipmi_user_t user) -{ - struct ipmi_system_interface_addr smi_addr; - struct kernel_ipmi_msg send_msg; - int rv; - /* - * Configure IPMI address for local access - */ - smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr.channel = IPMI_BMC_CHANNEL; - smi_addr.lun = 0; - - printk(KERN_INFO PFX "PPS powerdown hook used"); - - send_msg.netfn = IPMI_NETFN_OEM; - send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; - send_msg.data = IPMI_ATCA_PPS_IANA; - send_msg.data_len = 3; - rv = ipmi_request_in_rc_mode(user, - (struct ipmi_addr *) &smi_addr, - &send_msg); - if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { - printk(KERN_ERR PFX "Unable to send ATCA ," - " IPMI error 0x%x\n", rv); - } - return; -} - static int ipmi_atca_detect (ipmi_user_t user) { struct ipmi_system_interface_addr smi_addr; @@ -237,13 +167,6 @@ static int ipmi_atca_detect (ipmi_user_t user) rv = ipmi_request_wait_for_response(user, (struct ipmi_addr *) &smi_addr, &send_msg); - - printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); - if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) - && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { - printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n"); - atca_oem_poweroff_hook = pps_poweroff_atca; - } return !rv; } @@ -277,19 +200,12 @@ static void ipmi_poweroff_atca (ipmi_user_t user) rv = ipmi_request_in_rc_mode(user, (struct ipmi_addr *) &smi_addr, &send_msg); - /** At this point, the system may be shutting down, and most - ** serial drivers (if used) will have interrupts turned off - ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE - ** return code - **/ - if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { + if (rv) { printk(KERN_ERR PFX "Unable to send ATCA powerdown message," " IPMI error 0x%x\n", rv); goto out; } - if(atca_oem_poweroff_hook) - return atca_oem_poweroff_hook(user); out: return; } @@ -524,6 +440,15 @@ static struct poweroff_function poweroff_functions[] = { / sizeof(struct poweroff_function)) +/* Our local state. */ +static int ready = 0; +static ipmi_user_t ipmi_user; +static void (*specific_poweroff_func)(ipmi_user_t user) = NULL; + +/* Holds the old poweroff function so we can restore it on removal. */ +static void (*old_poweroff_func)(void); + + /* Called on a powerdown request. */ static void ipmi_poweroff_function (void) { @@ -548,9 +473,6 @@ static void ipmi_po_new_smi(int if_num, struct device *device) if (ready) return; - if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num)) - return; - rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, &ipmi_user); if (rv) { @@ -559,8 +481,6 @@ static void ipmi_po_new_smi(int if_num, struct device *device) return; } - ipmi_ifnum = if_num; - /* * Do a get device ide and store some results, since this is * used by several functions. @@ -621,15 +541,9 @@ static void ipmi_po_new_smi(int if_num, struct device *device) static void ipmi_po_smi_gone(int if_num) { - if (!ready) - return; - - if (ipmi_ifnum != if_num) - return; - - ready = 0; - ipmi_destroy_user(ipmi_user); - pm_power_off = old_poweroff_func; + /* This can never be called, because once poweroff driver is + registered, the interface can't go away until the power + driver is unregistered. */ } static struct ipmi_smi_watcher smi_watcher = @@ -702,9 +616,9 @@ static int ipmi_poweroff_init (void) printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); goto out_err; } +#endif out_err: -#endif return rv; } diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index 81a0c89598e7..bb1fac104fda 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -61,10 +61,6 @@ #include "ipmi_si_sm.h" #include #include -#include -#include - -#define PFX "ipmi_si: " /* Measure times between events in the driver. */ #undef DEBUG_TIMING @@ -96,7 +92,7 @@ enum si_intf_state { enum si_type { SI_KCS, SI_SMIC, SI_BT }; -static char *si_to_str[] = { "kcs", "smic", "bt" }; +static char *si_to_str[] = { "KCS", "SMIC", "BT" }; #define DEVICE_NAME "ipmi_si" @@ -226,10 +222,7 @@ struct smi_info static int force_kipmid[SI_MAX_PARMS]; static int num_force_kipmid; -static int unload_when_empty = 1; - static int try_smi_init(struct smi_info *smi); -static void cleanup_one_si(struct smi_info *to_clean); static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block * nb) @@ -247,18 +240,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, spin_lock(&(smi_info->si_lock)); } -static void return_hosed_msg(struct smi_info *smi_info, int cCode) +static void return_hosed_msg(struct smi_info *smi_info) { struct ipmi_smi_msg *msg = smi_info->curr_msg; - if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) - cCode = IPMI_ERR_UNSPECIFIED; - /* else use it as is */ - /* Make it a reponse */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; - msg->rsp[2] = cCode; + msg->rsp[2] = 0xFF; /* Unknown error. */ msg->rsp_size = 3; smi_info->curr_msg = NULL; @@ -309,7 +298,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) smi_info->curr_msg->data, smi_info->curr_msg->data_size); if (err) { - return_hosed_msg(smi_info, err); + return_hosed_msg(smi_info); } rv = SI_SM_CALL_WITHOUT_DELAY; @@ -651,7 +640,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, /* If we were handling a user message, format a response to send to the upper layer to tell it about the error. */ - return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); + return_hosed_msg(smi_info); } si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); } @@ -695,24 +684,22 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, { /* We are idle and the upper layer requested that I fetch events, so do so. */ - atomic_set(&smi_info->req_events, 0); + unsigned char msg[2]; - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) - goto out; + spin_lock(&smi_info->count_lock); + smi_info->flag_fetches++; + spin_unlock(&smi_info->count_lock); - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; - smi_info->curr_msg->data_size = 2; + atomic_set(&smi_info->req_events, 0); + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_EVENTS; + smi_info->si_sm, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; goto restart; } - out: + return si_sm_result; } @@ -727,15 +714,6 @@ static void sender(void *send_info, struct timeval t; #endif - if (atomic_read(&smi_info->stop_operation)) { - msg->rsp[0] = msg->data[0] | 4; - msg->rsp[1] = msg->data[1]; - msg->rsp[2] = IPMI_ERR_UNSPECIFIED; - msg->rsp_size = 3; - deliver_recv_msg(smi_info, msg); - return; - } - spin_lock_irqsave(&(smi_info->msg_lock), flags); #ifdef DEBUG_TIMING do_gettimeofday(&t); @@ -827,21 +805,13 @@ static void poll(void *send_info) { struct smi_info *smi_info = send_info; - /* - * Make sure there is some delay in the poll loop so we can - * drive time forward and timeout things. - */ - udelay(10); - smi_event_handler(smi_info, 10); + smi_event_handler(smi_info, 0); } static void request_events(void *send_info) { struct smi_info *smi_info = send_info; - if (atomic_read(&smi_info->stop_operation)) - return; - atomic_set(&smi_info->req_events, 1); } @@ -979,21 +949,12 @@ static int smi_start_processing(void *send_info, return 0; } -static void set_maintenance_mode(void *send_info, int enable) -{ - struct smi_info *smi_info = send_info; - - if (!enable) - atomic_set(&smi_info->req_events, 0); -} - static struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, .sender = sender, .request_events = request_events, - .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, .poll = poll, }; @@ -1026,16 +987,6 @@ static int num_regshifts = 0; static int slave_addrs[SI_MAX_PARMS]; static int num_slave_addrs = 0; -#define IPMI_IO_ADDR_SPACE 0 -#define IPMI_MEM_ADDR_SPACE 1 -static char *addr_space_to_str[] = { "I/O", "mem" }; - -static int hotmod_handler(const char *val, struct kernel_param *kp); - -module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); -MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" - " Documentation/IPMI.txt in the kernel sources for the" - " gory details."); module_param_named(trydefaults, si_trydefaults, bool, 0); MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" @@ -1087,12 +1038,12 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0); MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" " disabled(0). Normally the IPMI driver auto-detects" " this, but the value may be overridden by this parm."); -module_param(unload_when_empty, int, 0); -MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" - " specified or found, default is 1. Setting to 0" - " is useful for hot add of devices using hotmod."); +#define IPMI_IO_ADDR_SPACE 0 +#define IPMI_MEM_ADDR_SPACE 1 +static char *addr_space_to_str[] = { "I/O", "memory" }; + static void std_irq_cleanup(struct smi_info *info) { if (info->si_type == SI_BT) @@ -1366,234 +1317,6 @@ static int mem_setup(struct smi_info *info) return 0; } -/* - * Parms come in as [:op2[:op3...]]. ops are: - * add|remove,kcs|bt|smic,mem|i/o,
[,[,[,...]]] - * Options are: - * rsp= - * rsi= - * rsh= - * irq= - * ipmb= - */ -enum hotmod_op { HM_ADD, HM_REMOVE }; -struct hotmod_vals { - char *name; - int val; -}; -static struct hotmod_vals hotmod_ops[] = { - { "add", HM_ADD }, - { "remove", HM_REMOVE }, - { NULL } -}; -static struct hotmod_vals hotmod_si[] = { - { "kcs", SI_KCS }, - { "smic", SI_SMIC }, - { "bt", SI_BT }, - { NULL } -}; -static struct hotmod_vals hotmod_as[] = { - { "mem", IPMI_MEM_ADDR_SPACE }, - { "i/o", IPMI_IO_ADDR_SPACE }, - { NULL } -}; -static int ipmi_strcasecmp(const char *s1, const char *s2) -{ - while (*s1 || *s2) { - if (!*s1) - return -1; - if (!*s2) - return 1; - if (*s1 != *s2) - return *s1 - *s2; - s1++; - s2++; - } - return 0; -} -static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) -{ - char *s; - int i; - - s = strchr(*curr, ','); - if (!s) { - printk(KERN_WARNING PFX "No hotmod %s given.\n", name); - return -EINVAL; - } - *s = '\0'; - s++; - for (i = 0; hotmod_ops[i].name; i++) { - if (ipmi_strcasecmp(*curr, v[i].name) == 0) { - *val = v[i].val; - *curr = s; - return 0; - } - } - - printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); - return -EINVAL; -} - -static int hotmod_handler(const char *val, struct kernel_param *kp) -{ - char *str = kstrdup(val, GFP_KERNEL); - int rv = -EINVAL; - char *next, *curr, *s, *n, *o; - enum hotmod_op op; - enum si_type si_type; - int addr_space; - unsigned long addr; - int regspacing; - int regsize; - int regshift; - int irq; - int ipmb; - int ival; - struct smi_info *info; - - if (!str) - return -ENOMEM; - - /* Kill any trailing spaces, as we can get a "\n" from echo. */ - ival = strlen(str) - 1; - while ((ival >= 0) && isspace(str[ival])) { - str[ival] = '\0'; - ival--; - } - - for (curr = str; curr; curr = next) { - regspacing = 1; - regsize = 1; - regshift = 0; - irq = 0; - ipmb = 0x20; - - next = strchr(curr, ':'); - if (next) { - *next = '\0'; - next++; - } - - rv = parse_str(hotmod_ops, &ival, "operation", &curr); - if (rv) - break; - op = ival; - - rv = parse_str(hotmod_si, &ival, "interface type", &curr); - if (rv) - break; - si_type = ival; - - rv = parse_str(hotmod_as, &addr_space, "address space", &curr); - if (rv) - break; - - s = strchr(curr, ','); - if (s) { - *s = '\0'; - s++; - } - addr = simple_strtoul(curr, &n, 0); - if ((*n != '\0') || (*curr == '\0')) { - printk(KERN_WARNING PFX "Invalid hotmod address" - " '%s'\n", curr); - break; - } - - while (s) { - curr = s; - s = strchr(curr, ','); - if (s) { - *s = '\0'; - s++; - } - o = strchr(curr, '='); - if (o) { - *o = '\0'; - o++; - } -#define HOTMOD_INT_OPT(name, val) \ - if (ipmi_strcasecmp(curr, name) == 0) { \ - if (!o) { \ - printk(KERN_WARNING PFX \ - "No option given for '%s'\n", \ - curr); \ - goto out; \ - } \ - val = simple_strtoul(o, &n, 0); \ - if ((*n != '\0') || (*o == '\0')) { \ - printk(KERN_WARNING PFX \ - "Bad option given for '%s'\n", \ - curr); \ - goto out; \ - } \ - } - - HOTMOD_INT_OPT("rsp", regspacing) - else HOTMOD_INT_OPT("rsi", regsize) - else HOTMOD_INT_OPT("rsh", regshift) - else HOTMOD_INT_OPT("irq", irq) - else HOTMOD_INT_OPT("ipmb", ipmb) - else { - printk(KERN_WARNING PFX - "Invalid hotmod option '%s'\n", - curr); - goto out; - } -#undef HOTMOD_INT_OPT - } - - if (op == HM_ADD) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - rv = -ENOMEM; - goto out; - } - - info->addr_source = "hotmod"; - info->si_type = si_type; - info->io.addr_data = addr; - info->io.addr_type = addr_space; - if (addr_space == IPMI_MEM_ADDR_SPACE) - info->io_setup = mem_setup; - else - info->io_setup = port_setup; - - info->io.addr = NULL; - info->io.regspacing = regspacing; - if (!info->io.regspacing) - info->io.regspacing = DEFAULT_REGSPACING; - info->io.regsize = regsize; - if (!info->io.regsize) - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = regshift; - info->irq = irq; - if (info->irq) - info->irq_setup = std_irq_setup; - info->slave_addr = ipmb; - - try_smi_init(info); - } else { - /* remove */ - struct smi_info *e, *tmp_e; - - mutex_lock(&smi_infos_lock); - list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { - if (e->io.addr_type != addr_space) - continue; - if (e->si_type != si_type) - continue; - if (e->io.addr_data == addr) - cleanup_one_si(e); - } - mutex_unlock(&smi_infos_lock); - } - } - out: - kfree(str); - return rv; -} static __devinit void hardcode_find_bmc(void) { @@ -1610,11 +1333,11 @@ static __devinit void hardcode_find_bmc(void) info->addr_source = "hardcoded"; - if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) { + if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { info->si_type = SI_KCS; - } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) { + } else if (strcmp(si_type[i], "smic") == 0) { info->si_type = SI_SMIC; - } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) { + } else if (strcmp(si_type[i], "bt") == 0) { info->si_type = SI_BT; } else { printk(KERN_WARNING @@ -2229,9 +1952,19 @@ static int try_get_dev_id(struct smi_info *smi_info) static int type_file_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { + char *out = (char *) page; struct smi_info *smi = data; - return sprintf(page, "%s\n", si_to_str[smi->si_type]); + switch (smi->si_type) { + case SI_KCS: + return sprintf(out, "kcs\n"); + case SI_SMIC: + return sprintf(out, "smic\n"); + case SI_BT: + return sprintf(out, "bt\n"); + default: + return 0; + } } static int stat_file_read_proc(char *page, char **start, off_t off, @@ -2267,24 +2000,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off, out += sprintf(out, "incoming_messages: %ld\n", smi->incoming_messages); - return out - page; -} - -static int param_read_proc(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - struct smi_info *smi = data; - - return sprintf(page, - "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", - si_to_str[smi->si_type], - addr_space_to_str[smi->io.addr_type], - smi->io.addr_data, - smi->io.regspacing, - smi->io.regsize, - smi->io.regshift, - smi->irq, - smi->slave_addr); + return (out - ((char *) page)); } /* @@ -2646,7 +2362,6 @@ static int try_smi_init(struct smi_info *new_smi) new_smi, &new_smi->device_id, new_smi->dev, - "bmc", new_smi->slave_addr); if (rv) { printk(KERN_ERR @@ -2675,16 +2390,6 @@ static int try_smi_init(struct smi_info *new_smi) goto out_err_stop_timer; } - rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", - param_read_proc, NULL, - new_smi, THIS_MODULE); - if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); - goto out_err_stop_timer; - } - list_add_tail(&new_smi->link, &smi_infos); mutex_unlock(&smi_infos_lock); @@ -2778,12 +2483,7 @@ static __devinit int init_ipmi_si(void) #endif #ifdef CONFIG_PCI - rv = pci_register_driver(&ipmi_pci_driver); - if (rv){ - printk(KERN_ERR - "init_ipmi_si: Unable to register PCI driver: %d\n", - rv); - } + pci_module_init(&ipmi_pci_driver); #endif if (si_trydefaults) { @@ -2798,7 +2498,7 @@ static __devinit int init_ipmi_si(void) } mutex_lock(&smi_infos_lock); - if (unload_when_empty && list_empty(&smi_infos)) { + if (list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); #ifdef CONFIG_PCI pci_unregister_driver(&ipmi_pci_driver); @@ -2813,7 +2513,7 @@ static __devinit int init_ipmi_si(void) } module_init(init_ipmi_si); -static void cleanup_one_si(struct smi_info *to_clean) +static void __devexit cleanup_one_si(struct smi_info *to_clean) { int rv; unsigned long flags; diff --git a/trunk/drivers/char/ipmi/ipmi_smic_sm.c b/trunk/drivers/char/ipmi/ipmi_smic_sm.c index e64ea7d25d24..39d7e5ef1a2b 100644 --- a/trunk/drivers/char/ipmi/ipmi_smic_sm.c +++ b/trunk/drivers/char/ipmi/ipmi_smic_sm.c @@ -141,14 +141,12 @@ static int start_smic_transaction(struct si_sm_data *smic, { unsigned int i; - if (size < 2) - return IPMI_REQ_LEN_INVALID_ERR; - if (size > MAX_SMIC_WRITE_SIZE) - return IPMI_REQ_LEN_EXCEEDED_ERR; - - if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) - return IPMI_NOT_IN_MY_STATE_ERR; - + if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) { + return -1; + } + if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) { + return -2; + } if (smic_debug & SMIC_DEBUG_MSG) { printk(KERN_INFO "start_smic_transaction -"); for (i = 0; i < size; i ++) { diff --git a/trunk/drivers/char/ipmi/ipmi_watchdog.c b/trunk/drivers/char/ipmi/ipmi_watchdog.c index 90fb2a541916..73f759eaa5a6 100644 --- a/trunk/drivers/char/ipmi/ipmi_watchdog.c +++ b/trunk/drivers/char/ipmi/ipmi_watchdog.c @@ -135,7 +135,6 @@ static int nowayout = WATCHDOG_NOWAYOUT; static ipmi_user_t watchdog_user = NULL; -static int watchdog_ifnum; /* Default the timeout to 10 seconds. */ static int timeout = 10; @@ -162,8 +161,6 @@ static struct fasync_struct *fasync_q = NULL; static char pretimeout_since_last_heartbeat = 0; static char expect_close; -static int ifnum_to_use = -1; - static DECLARE_RWSEM(register_sem); /* Parameters to ipmi_set_timeout */ @@ -172,8 +169,6 @@ static DECLARE_RWSEM(register_sem); #define IPMI_SET_TIMEOUT_FORCE_HB 2 static int ipmi_set_timeout(int do_heartbeat); -static void ipmi_register_watchdog(int ipmi_intf); -static void ipmi_unregister_watchdog(int ipmi_intf); /* If true, the driver will start running as soon as it is configured and ready. */ @@ -250,26 +245,6 @@ static int get_param_str(char *buffer, struct kernel_param *kp) return strlen(buffer); } - -static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp) -{ - int rv = param_set_int(val, kp); - if (rv) - return rv; - if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum)) - return 0; - - ipmi_unregister_watchdog(watchdog_ifnum); - ipmi_register_watchdog(ifnum_to_use); - return 0; -} - -module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int, - &ifnum_to_use, 0644); -MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " - "timer. Setting to -1 defaults to the first registered " - "interface"); - module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644); MODULE_PARM_DESC(timeout, "Timeout value in seconds."); @@ -288,13 +263,12 @@ module_param_call(preop, set_param_str, get_param_str, preop_op, 0644); MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " "preop_none, preop_panic, preop_give_data."); -module_param(start_now, int, 0444); +module_param(start_now, int, 0); MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" "soon as the driver is loaded."); module_param(nowayout, int, 0644); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " - "(default=CONFIG_WATCHDOG_NOWAYOUT)"); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); /* Default state of the timer. */ static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; @@ -898,11 +872,6 @@ static void ipmi_register_watchdog(int ipmi_intf) if (watchdog_user) goto out; - if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf)) - goto out; - - watchdog_ifnum = ipmi_intf; - rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); if (rv < 0) { printk(KERN_CRIT PFX "Unable to register with ipmi\n"); @@ -932,39 +901,6 @@ static void ipmi_register_watchdog(int ipmi_intf) } } -static void ipmi_unregister_watchdog(int ipmi_intf) -{ - int rv; - - down_write(®ister_sem); - - if (!watchdog_user) - goto out; - - if (watchdog_ifnum != ipmi_intf) - goto out; - - /* Make sure no one can call us any more. */ - misc_deregister(&ipmi_wdog_miscdev); - - /* Wait to make sure the message makes it out. The lower layer has - pointers to our buffers, we want to make sure they are done before - we release our memory. */ - while (atomic_read(&set_timeout_tofree)) - schedule_timeout_uninterruptible(1); - - /* Disconnect from IPMI. */ - rv = ipmi_destroy_user(watchdog_user); - if (rv) { - printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", - rv); - } - watchdog_user = NULL; - - out: - up_write(®ister_sem); -} - #ifdef HAVE_NMI_HANDLER static int ipmi_nmi(void *dev_id, int cpu, int handled) @@ -1068,7 +1004,9 @@ static void ipmi_new_smi(int if_num, struct device *device) static void ipmi_smi_gone(int if_num) { - ipmi_unregister_watchdog(if_num); + /* This can never be called, because once the watchdog is + registered, the interface can't go away until the watchdog + is unregistered. */ } static struct ipmi_smi_watcher smi_watcher = @@ -1210,32 +1148,30 @@ static int __init ipmi_wdog_init(void) check_parms(); - register_reboot_notifier(&wdog_reboot_notifier); - atomic_notifier_chain_register(&panic_notifier_list, - &wdog_panic_notifier); - rv = ipmi_smi_watcher_register(&smi_watcher); if (rv) { #ifdef HAVE_NMI_HANDLER if (preaction_val == WDOG_PRETIMEOUT_NMI) release_nmi(&ipmi_nmi_handler); #endif - atomic_notifier_chain_unregister(&panic_notifier_list, - &wdog_panic_notifier); - unregister_reboot_notifier(&wdog_reboot_notifier); printk(KERN_WARNING PFX "can't register smi watcher\n"); return rv; } + register_reboot_notifier(&wdog_reboot_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &wdog_panic_notifier); + printk(KERN_INFO PFX "driver initialized\n"); return 0; } -static void __exit ipmi_wdog_exit(void) +static __exit void ipmi_unregister_watchdog(void) { - ipmi_smi_watcher_unregister(&smi_watcher); - ipmi_unregister_watchdog(watchdog_ifnum); + int rv; + + down_write(®ister_sem); #ifdef HAVE_NMI_HANDLER if (nmi_handler_registered) @@ -1243,8 +1179,37 @@ static void __exit ipmi_wdog_exit(void) #endif atomic_notifier_chain_unregister(&panic_notifier_list, - &wdog_panic_notifier); + &wdog_panic_notifier); unregister_reboot_notifier(&wdog_reboot_notifier); + + if (! watchdog_user) + goto out; + + /* Make sure no one can call us any more. */ + misc_deregister(&ipmi_wdog_miscdev); + + /* Wait to make sure the message makes it out. The lower layer has + pointers to our buffers, we want to make sure they are done before + we release our memory. */ + while (atomic_read(&set_timeout_tofree)) + schedule_timeout_uninterruptible(1); + + /* Disconnect from IPMI. */ + rv = ipmi_destroy_user(watchdog_user); + if (rv) { + printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n", + rv); + } + watchdog_user = NULL; + + out: + up_write(®ister_sem); +} + +static void __exit ipmi_wdog_exit(void) +{ + ipmi_smi_watcher_unregister(&smi_watcher); + ipmi_unregister_watchdog(); } module_exit(ipmi_wdog_exit); module_init(ipmi_wdog_init); diff --git a/trunk/drivers/char/isicom.c b/trunk/drivers/char/isicom.c index 1637c1d9a4ba..58c955e390b3 100644 --- a/trunk/drivers/char/isicom.c +++ b/trunk/drivers/char/isicom.c @@ -530,9 +530,9 @@ static void isicom_tx(unsigned long _data) /* Interrupt handlers */ -static void isicom_bottomhalf(struct work_struct *work) +static void isicom_bottomhalf(void *data) { - struct isi_port *port = container_of(work, struct isi_port, bh_tqueue); + struct isi_port *port = (struct isi_port *) data; struct tty_struct *tty = port->tty; if (!tty) @@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty) } /* hangup et all */ -static void do_isicom_hangup(struct work_struct *work) +static void do_isicom_hangup(void *data) { - struct isi_port *port = container_of(work, struct isi_port, hangup_tq); + struct isi_port *port = data; struct tty_struct *tty; tty = port->tty; @@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void) port->channel = channel; port->close_delay = 50 * HZ/100; port->closing_wait = 3000 * HZ/100; - INIT_WORK(&port->hangup_tq, do_isicom_hangup); - INIT_WORK(&port->bh_tqueue, isicom_bottomhalf); + INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); + INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); port->status = 0; init_waitqueue_head(&port->open_wait); init_waitqueue_head(&port->close_wait); diff --git a/trunk/drivers/char/istallion.c b/trunk/drivers/char/istallion.c index 8f591945ebd9..ffdf9df1a67a 100644 --- a/trunk/drivers/char/istallion.c +++ b/trunk/drivers/char/istallion.c @@ -663,7 +663,7 @@ static int stli_initopen(stlibrd_t *brdp, stliport_t *portp); static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp); -static void stli_dohangup(struct work_struct *); +static void stli_dohangup(void *arg); static int stli_setport(stliport_t *portp); static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); @@ -1990,9 +1990,9 @@ static void stli_start(struct tty_struct *tty) * aren't that time critical). */ -static void stli_dohangup(struct work_struct *ugly_api) +static void stli_dohangup(void *arg) { - stliport_t *portp = container_of(ugly_api, stliport_t, tqhangup); + stliport_t *portp = (stliport_t *) arg; if (portp->tty != NULL) { tty_hangup(portp->tty); } @@ -2898,7 +2898,7 @@ static int stli_initports(stlibrd_t *brdp) portp->baud_base = STL_BAUDBASE; portp->close_delay = STL_CLOSEDELAY; portp->closing_wait = 30 * HZ; - INIT_WORK(&portp->tqhangup, stli_dohangup); + INIT_WORK(&portp->tqhangup, stli_dohangup, portp); init_waitqueue_head(&portp->open_wait); init_waitqueue_head(&portp->close_wait); init_waitqueue_head(&portp->raw_wait); @@ -3476,8 +3476,6 @@ static int stli_initecp(stlibrd_t *brdp) if (sig.magic != cpu_to_le32(ECP_MAGIC)) { release_region(brdp->iobase, brdp->iosize); - iounmap(brdp->membase); - brdp->membase = NULL; return -ENODEV; } @@ -3634,8 +3632,6 @@ static int stli_initonb(stlibrd_t *brdp) sig.magic3 != cpu_to_le16(ONB_MAGIC3)) { release_region(brdp->iobase, brdp->iosize); - iounmap(brdp->membase); - brdp->membase = NULL; return -ENODEV; } diff --git a/trunk/drivers/char/misc.c b/trunk/drivers/char/misc.c index 7e975f606924..7a484fc7cb9e 100644 --- a/trunk/drivers/char/misc.c +++ b/trunk/drivers/char/misc.c @@ -199,8 +199,6 @@ int misc_register(struct miscdevice * misc) dev_t dev; int err = 0; - INIT_LIST_HEAD(&misc->list); - down(&misc_sem); list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { diff --git a/trunk/drivers/char/mmtimer.c b/trunk/drivers/char/mmtimer.c index c09160383a53..22b9905c1e52 100644 --- a/trunk/drivers/char/mmtimer.c +++ b/trunk/drivers/char/mmtimer.c @@ -680,7 +680,7 @@ static int __init mmtimer_init(void) if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); - goto out1; + return -1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / @@ -689,13 +689,13 @@ static int __init mmtimer_init(void) if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) { printk(KERN_WARNING "%s: unable to allocate interrupt.", MMTIMER_NAME); - goto out1; + return -1; } if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); - goto out2; + return -1; } /* Get max numbered node, calculate slots needed */ @@ -709,18 +709,16 @@ static int __init mmtimer_init(void) if (timers == NULL) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); - goto out3; + return -1; } - memset(timers,0,(sizeof(mmtimer_t *)*maxn)); - /* Allocate mmtimer_t's for each online node */ for_each_online_node(node) { timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); if (timers[node] == NULL) { printk(KERN_ERR "%s: failed to allocate memory for device\n", MMTIMER_NAME); - goto out4; + return -1; } for (i=0; i< NUM_COMPARATORS; i++) { mmtimer_t * base = timers[node] + i; @@ -741,17 +739,6 @@ static int __init mmtimer_init(void) sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; - -out4: - for_each_online_node(node) { - kfree(timers[node]); - } -out3: - misc_deregister(&mmtimer_miscdev); -out2: - free_irq(SGI_MMTIMER_VECTOR, NULL); -out1: - return -1; } module_init(mmtimer_init); diff --git a/trunk/drivers/char/moxa.c b/trunk/drivers/char/moxa.c index 8b316953173d..96cb1f07332b 100644 --- a/trunk/drivers/char/moxa.c +++ b/trunk/drivers/char/moxa.c @@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem; /* * static functions: */ -static void do_moxa_softint(struct work_struct *); +static void do_moxa_softint(void *); static int moxa_open(struct tty_struct *, struct file *); static void moxa_close(struct tty_struct *, struct file *); static int moxa_write(struct tty_struct *, const unsigned char *, int); @@ -363,7 +363,7 @@ static int __init moxa_init(void) for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { ch->type = PORT_16550A; ch->port = i; - INIT_WORK(&ch->tqueue, do_moxa_softint); + INIT_WORK(&ch->tqueue, do_moxa_softint, ch); ch->tty = NULL; ch->close_delay = 5 * HZ / 10; ch->closing_wait = 30 * HZ; @@ -498,12 +498,9 @@ static void __exit moxa_exit(void) printk("Couldn't unregister MOXA Intellio family serial driver\n"); put_tty_driver(moxaDriver); - for (i = 0; i < MAX_BOARDS; i++) { - if (moxaBaseAddr[i]) - iounmap(moxaBaseAddr[i]); + for (i = 0; i < MAX_BOARDS; i++) if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI) pci_dev_put(moxa_boards[i].pciInfo.pdev); - } if (verbose) printk("Done\n"); @@ -512,9 +509,9 @@ static void __exit moxa_exit(void) module_init(moxa_init); module_exit(moxa_exit); -static void do_moxa_softint(struct work_struct *work) +static void do_moxa_softint(void *private_) { - struct moxa_str *ch = container_of(work, struct moxa_str, tqueue); + struct moxa_str *ch = (struct moxa_str *) private_; struct tty_struct *tty; if (ch && (tty = ch->tty)) { diff --git a/trunk/drivers/char/mxser.c b/trunk/drivers/char/mxser.c index 5ed2486b7581..048d91142c17 100644 --- a/trunk/drivers/char/mxser.c +++ b/trunk/drivers/char/mxser.c @@ -389,7 +389,7 @@ static int mxser_init(void); /* static void mxser_poll(unsigned long); */ static int mxser_get_ISA_conf(int, struct mxser_hwconf *); static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); -static void mxser_do_softint(struct work_struct *); +static void mxser_do_softint(void *); static int mxser_open(struct tty_struct *, struct file *); static void mxser_close(struct tty_struct *, struct file *); static int mxser_write(struct tty_struct *, const unsigned char *, int); @@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf) info->custom_divisor = hwconf->baud_base[i] * 16; info->close_delay = 5 * HZ / 10; info->closing_wait = 30 * HZ; - INIT_WORK(&info->tqueue, mxser_do_softint); + INIT_WORK(&info->tqueue, mxser_do_softint, info); info->normal_termios = mxvar_sdriver->init_termios; init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); @@ -917,10 +917,9 @@ static int mxser_init(void) return 0; } -static void mxser_do_softint(struct work_struct *work) +static void mxser_do_softint(void *private_) { - struct mxser_struct *info = - container_of(work, struct mxser_struct, tqueue); + struct mxser_struct *info = private_; struct tty_struct *tty; tty = info->tty; diff --git a/trunk/drivers/char/pcmcia/synclink_cs.c b/trunk/drivers/char/pcmcia/synclink_cs.c index 74d21c1c104f..f9f72500ea5d 100644 --- a/trunk/drivers/char/pcmcia/synclink_cs.c +++ b/trunk/drivers/char/pcmcia/synclink_cs.c @@ -75,10 +75,8 @@ #include #include -#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) -#define SYNCLINK_GENERIC_HDLC 1 -#else -#define SYNCLINK_GENERIC_HDLC 0 +#ifdef CONFIG_HDLC_MODULE +#define CONFIG_HDLC 1 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) @@ -237,7 +235,7 @@ typedef struct _mgslpc_info { int dosyncppp; spinlock_t netlock; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC struct net_device *netdev; #endif @@ -394,7 +392,7 @@ static void tx_timeout(unsigned long context); static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(MGSLPC_INFO *info); static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); @@ -423,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id); /* * Bottom half interrupt handlers */ -static void bh_handler(struct work_struct *work); +static void bh_handler(void* Context); static void bh_transmit(MGSLPC_INFO *info); static void bh_status(MGSLPC_INFO *info); @@ -549,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link) memset(info, 0, sizeof(MGSLPC_INFO)); info->magic = MGSLPC_MAGIC; - INIT_WORK(&info->task, bh_handler); + INIT_WORK(&info->task, bh_handler, info); info->max_frame_size = 4096; info->close_delay = 5*HZ/10; info->closing_wait = 30*HZ; @@ -837,9 +835,9 @@ static int bh_action(MGSLPC_INFO *info) return rc; } -static void bh_handler(struct work_struct *work) +static void bh_handler(void* Context) { - MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); + MGSLPC_INFO *info = (MGSLPC_INFO*)Context; int action; if (!info) @@ -1055,7 +1053,7 @@ static void tx_done(MGSLPC_INFO *info) info->drop_rts_on_tx_done = 0; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -1166,7 +1164,7 @@ static void dcd_change(MGSLPC_INFO *info) } else info->input_signal_events.dcd_down++; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) { if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(info->netdev); @@ -2955,7 +2953,7 @@ static void mgslpc_add_device(MGSLPC_INFO *info) printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", info->device_name, info->io_base, info->irq_level); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_init(info); #endif } @@ -2971,7 +2969,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info) last->next_device = info->next_device; else mgslpc_device_list = info->next_device; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_exit(info); #endif release_resources(info); @@ -3903,7 +3901,7 @@ static int rx_get_frame(MGSLPC_INFO *info) return_frame = 1; } framesize = 0; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC { struct net_device_stats *stats = hdlc_stats(info->netdev); stats->rx_errors++; @@ -3937,7 +3935,7 @@ static int rx_get_frame(MGSLPC_INFO *info) ++framesize; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_rx(info, buf->data, framesize); else @@ -4093,7 +4091,7 @@ static void tx_timeout(unsigned long context) spin_unlock_irqrestore(&info->lock,flags); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -4101,7 +4099,7 @@ static void tx_timeout(unsigned long context) bh_transmit(info); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) diff --git a/trunk/drivers/char/random.c b/trunk/drivers/char/random.c index 4c6782a1ecdb..d40df30c2b10 100644 --- a/trunk/drivers/char/random.c +++ b/trunk/drivers/char/random.c @@ -1422,9 +1422,9 @@ static struct keydata { static unsigned int ip_cnt; -static void rekey_seq_generator(struct work_struct *work); +static void rekey_seq_generator(void *private_); -static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); +static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); /* * Lock avoidance: @@ -1438,7 +1438,7 @@ static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); * happen, and even if that happens only a not perfectly compliant * ISN is generated, nothing fatal. */ -static void rekey_seq_generator(struct work_struct *work) +static void rekey_seq_generator(void *private_) { struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; diff --git a/trunk/drivers/char/rio/rio_linux.c b/trunk/drivers/char/rio/rio_linux.c index e79b2ede8510..7ac68cb3bedd 100644 --- a/trunk/drivers/char/rio/rio_linux.c +++ b/trunk/drivers/char/rio/rio_linux.c @@ -1026,7 +1026,6 @@ static int __init rio_init(void) found++; } else { iounmap(p->RIOHosts[p->RIONumHosts].Caddr); - p->RIOHosts[p->RIONumHosts].Caddr = NULL; } } @@ -1079,7 +1078,6 @@ static int __init rio_init(void) found++; } else { iounmap(p->RIOHosts[p->RIONumHosts].Caddr); - p->RIOHosts[p->RIONumHosts].Caddr = NULL; } #else printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n"); @@ -1119,10 +1117,8 @@ static int __init rio_init(void) } } - if (!okboard) { + if (!okboard) iounmap(hp->Caddr); - hp->Caddr = NULL; - } } } @@ -1192,8 +1188,6 @@ static void __exit rio_exit(void) } /* It is safe/allowed to del_timer a non-active timer */ del_timer(&hp->timer); - if (hp->Caddr) - iounmap(hp->Caddr); if (hp->Type == RIO_PCI) pci_dev_put(hp->pdev); } diff --git a/trunk/drivers/char/riscom8.c b/trunk/drivers/char/riscom8.c index 0a77bfcd5b5e..5ab32b38f45a 100644 --- a/trunk/drivers/char/riscom8.c +++ b/trunk/drivers/char/riscom8.c @@ -82,6 +82,11 @@ static struct riscom_board * IRQ_to_board[16]; static struct tty_driver *riscom_driver; +static unsigned long baud_table[] = { + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, + 9600, 19200, 38400, 57600, 76800, 0, +}; + static struct riscom_board rc_board[RC_NBOARD] = { { .base = RC_IOBASE1, @@ -1511,9 +1516,9 @@ static void rc_start(struct tty_struct * tty) * do_rc_hangup() -> tty->hangup() -> rc_hangup() * */ -static void do_rc_hangup(struct work_struct *ugly_api) +static void do_rc_hangup(void *private_) { - struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue_hangup); + struct riscom_port *port = (struct riscom_port *) private_; struct tty_struct *tty; tty = port->tty; @@ -1562,9 +1567,9 @@ static void rc_set_termios(struct tty_struct * tty, struct termios * old_termios } } -static void do_softint(struct work_struct *ugly_api) +static void do_softint(void *private_) { - struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue); + struct riscom_port *port = (struct riscom_port *) private_; struct tty_struct *tty; if(!(tty = port->tty)) @@ -1627,8 +1632,8 @@ static inline int rc_init_drivers(void) memset(rc_port, 0, sizeof(rc_port)); for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { rc_port[i].magic = RISCOM8_MAGIC; - INIT_WORK(&rc_port[i].tqueue, do_softint); - INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup); + INIT_WORK(&rc_port[i].tqueue, do_softint, &rc_port[i]); + INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup, &rc_port[i]); rc_port[i].close_delay = 50 * HZ/100; rc_port[i].closing_wait = 3000 * HZ/100; init_waitqueue_head(&rc_port[i].open_wait); diff --git a/trunk/drivers/char/serial167.c b/trunk/drivers/char/serial167.c index 9ba13af234be..3af7f0958c5d 100644 --- a/trunk/drivers/char/serial167.c +++ b/trunk/drivers/char/serial167.c @@ -706,9 +706,9 @@ cd2401_rx_interrupt(int irq, void *dev_id) * had to poll every port to see if that port needed servicing. */ static void -do_softint(struct work_struct *ugly_api) +do_softint(void *private_) { - struct cyclades_port *info = container_of(ugly_api, struct cyclades_port, tqueue); + struct cyclades_port *info = (struct cyclades_port *) private_; struct tty_struct *tty; tty = info->tty; @@ -2273,7 +2273,7 @@ scrn[1] = '\0'; info->blocked_open = 0; info->default_threshold = 0; info->default_timeout = 0; - INIT_WORK(&info->tqueue, do_softint); + INIT_WORK(&info->tqueue, do_softint, info); init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); /* info->session */ diff --git a/trunk/drivers/char/sonypi.c b/trunk/drivers/char/sonypi.c index fc87070f1866..c084149153de 100644 --- a/trunk/drivers/char/sonypi.c +++ b/trunk/drivers/char/sonypi.c @@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state) sonypi_device.bluetooth_power = state; } -static void input_keyrelease(struct work_struct *work) +static void input_keyrelease(void *data) { struct sonypi_keypress kp; @@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev) goto err_inpdev_unregister; } - INIT_WORK(&sonypi_device.input_work, input_keyrelease); + INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); } sonypi_enable(0); diff --git a/trunk/drivers/char/specialix.c b/trunk/drivers/char/specialix.c index 99137ab66b62..7e1bd9562c2a 100644 --- a/trunk/drivers/char/specialix.c +++ b/trunk/drivers/char/specialix.c @@ -2261,10 +2261,9 @@ static void sx_start(struct tty_struct * tty) * do_sx_hangup() -> tty->hangup() -> sx_hangup() * */ -static void do_sx_hangup(struct work_struct *work) +static void do_sx_hangup(void *private_) { - struct specialix_port *port = - container_of(work, struct specialix_port, tqueue_hangup); + struct specialix_port *port = (struct specialix_port *) private_; struct tty_struct *tty; func_enter(); @@ -2337,10 +2336,9 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios } -static void do_softint(struct work_struct *work) +static void do_softint(void *private_) { - struct specialix_port *port = - container_of(work, struct specialix_port, tqueue); + struct specialix_port *port = (struct specialix_port *) private_; struct tty_struct *tty; func_enter(); @@ -2413,8 +2411,8 @@ static int sx_init_drivers(void) memset(sx_port, 0, sizeof(sx_port)); for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { sx_port[i].magic = SPECIALIX_MAGIC; - INIT_WORK(&sx_port[i].tqueue, do_softint); - INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup); + INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); + INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); sx_port[i].close_delay = 50 * HZ/100; sx_port[i].closing_wait = 3000 * HZ/100; init_waitqueue_head(&sx_port[i].open_wait); diff --git a/trunk/drivers/char/stallion.c b/trunk/drivers/char/stallion.c index 5e2de62bce70..522e88e395cc 100644 --- a/trunk/drivers/char/stallion.c +++ b/trunk/drivers/char/stallion.c @@ -500,7 +500,7 @@ static int stl_echatintr(stlbrd_t *brdp); static int stl_echmcaintr(stlbrd_t *brdp); static int stl_echpciintr(stlbrd_t *brdp); static int stl_echpci64intr(stlbrd_t *brdp); -static void stl_offintr(struct work_struct *); +static void stl_offintr(void *private); static stlbrd_t *stl_allocbrd(void); static stlport_t *stl_getport(int brdnr, int panelnr, int portnr); @@ -2081,12 +2081,14 @@ static int stl_echpci64intr(stlbrd_t *brdp) /* * Service an off-level request for some channel. */ -static void stl_offintr(struct work_struct *work) +static void stl_offintr(void *private) { - stlport_t *portp = container_of(work, stlport_t, tqueue); + stlport_t *portp; struct tty_struct *tty; unsigned int oldsigs; + portp = private; + #ifdef DEBUG printk("stl_offintr(portp=%x)\n", (int) portp); #endif @@ -2154,7 +2156,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp) portp->baud_base = STL_BAUDBASE; portp->close_delay = STL_CLOSEDELAY; portp->closing_wait = 30 * HZ; - INIT_WORK(&portp->tqueue, stl_offintr); + INIT_WORK(&portp->tqueue, stl_offintr, portp); init_waitqueue_head(&portp->open_wait); init_waitqueue_head(&portp->close_wait); portp->stats.brd = portp->brdnr; diff --git a/trunk/drivers/char/synclink.c b/trunk/drivers/char/synclink.c index 645187b9141e..06784adcc35c 100644 --- a/trunk/drivers/char/synclink.c +++ b/trunk/drivers/char/synclink.c @@ -101,10 +101,8 @@ #include #include -#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) -#define SYNCLINK_GENERIC_HDLC 1 -#else -#define SYNCLINK_GENERIC_HDLC 0 +#ifdef CONFIG_HDLC_MODULE +#define CONFIG_HDLC 1 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) @@ -322,7 +320,7 @@ struct mgsl_struct { int dosyncppp; spinlock_t netlock; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC struct net_device *netdev; #endif }; @@ -730,7 +728,7 @@ static void usc_loopmode_send_done( struct mgsl_struct * info ); static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(struct mgsl_struct *info); static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); @@ -804,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u /* * Bottom half interrupt handlers */ -static void mgsl_bh_handler(struct work_struct *work); +static void mgsl_bh_handler(void* Context); static void mgsl_bh_receive(struct mgsl_struct *info); static void mgsl_bh_transmit(struct mgsl_struct *info); static void mgsl_bh_status(struct mgsl_struct *info); @@ -1073,10 +1071,9 @@ static int mgsl_bh_action(struct mgsl_struct *info) /* * Perform bottom half processing of work items queued by ISR. */ -static void mgsl_bh_handler(struct work_struct *work) +static void mgsl_bh_handler(void* Context) { - struct mgsl_struct *info = - container_of(work, struct mgsl_struct, task); + struct mgsl_struct *info = (struct mgsl_struct*)Context; int action; if (!info) @@ -1279,7 +1276,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info ) info->drop_rts_on_tx_done = 0; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -1344,7 +1341,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info ) info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) { if (status & MISCSTATUS_DCD) netif_carrier_on(info->netdev); @@ -4315,7 +4312,7 @@ static void mgsl_add_device( struct mgsl_struct *info ) info->max_frame_size ); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_init(info); #endif @@ -4340,7 +4337,7 @@ static struct mgsl_struct* mgsl_allocate_device(void) } else { memset(info, 0, sizeof(struct mgsl_struct)); info->magic = MGSL_MAGIC; - INIT_WORK(&info->task, mgsl_bh_handler); + INIT_WORK(&info->task, mgsl_bh_handler, info); info->max_frame_size = 4096; info->close_delay = 5*HZ/10; info->closing_wait = 30*HZ; @@ -4473,7 +4470,7 @@ static void synclink_cleanup(void) info = mgsl_device_list; while(info) { -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_exit(info); #endif mgsl_release_resources(info); @@ -6647,7 +6644,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info) return_frame = 1; } framesize = 0; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC { struct net_device_stats *stats = hdlc_stats(info->netdev); stats->rx_errors++; @@ -6723,7 +6720,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info) *ptmp); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); else @@ -7627,7 +7624,7 @@ static void mgsl_tx_timeout(unsigned long context) spin_unlock_irqrestore(&info->irq_spinlock,flags); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -7703,7 +7700,7 @@ static int usc_loopmode_active( struct mgsl_struct * info) return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) diff --git a/trunk/drivers/char/synclink_gt.c b/trunk/drivers/char/synclink_gt.c index e4730a7312b5..d4334c79f8d4 100644 --- a/trunk/drivers/char/synclink_gt.c +++ b/trunk/drivers/char/synclink_gt.c @@ -83,10 +83,8 @@ #include "linux/synclink.h" -#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE)) -#define SYNCLINK_GENERIC_HDLC 1 -#else -#define SYNCLINK_GENERIC_HDLC 0 +#ifdef CONFIG_HDLC_MODULE +#define CONFIG_HDLC 1 #endif /* @@ -173,7 +171,7 @@ static void set_break(struct tty_struct *tty, int break_state); /* * generic HDLC support and callbacks */ -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(struct slgt_info *info); static void hdlcdev_rx(struct slgt_info *info, char *buf, int size); @@ -361,7 +359,7 @@ struct slgt_info { int netcount; int dosyncppp; spinlock_t netlock; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC struct net_device *netdev; #endif @@ -487,7 +485,7 @@ static void enable_loopback(struct slgt_info *info); static void set_rate(struct slgt_info *info, u32 data_rate); static int bh_action(struct slgt_info *info); -static void bh_handler(struct work_struct *work); +static void bh_handler(void* context); static void bh_transmit(struct slgt_info *info); static void isr_serial(struct slgt_info *info); static void isr_rdma(struct slgt_info *info); @@ -1356,7 +1354,7 @@ static void set_break(struct tty_struct *tty, int break_state) spin_unlock_irqrestore(&info->lock,flags); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) @@ -1880,9 +1878,9 @@ static int bh_action(struct slgt_info *info) /* * perform bottom half processing */ -static void bh_handler(struct work_struct *work) +static void bh_handler(void* context) { - struct slgt_info *info = container_of(work, struct slgt_info, task); + struct slgt_info *info = context; int action; if (!info) @@ -2004,7 +2002,7 @@ static void dcd_change(struct slgt_info *info) } else { info->input_signal_events.dcd_down++; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) { if (info->signals & SerialSignal_DCD) netif_carrier_on(info->netdev); @@ -2182,7 +2180,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status) set_signals(info); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -3308,7 +3306,7 @@ static void add_device(struct slgt_info *info) devstr, info->device_name, info->phys_reg_addr, info->irq_level, info->max_frame_size); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_init(info); #endif } @@ -3328,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev } else { memset(info, 0, sizeof(struct slgt_info)); info->magic = MGSL_MAGIC; - INIT_WORK(&info->task, bh_handler); + INIT_WORK(&info->task, bh_handler, info); info->max_frame_size = 4096; info->raw_rx_size = DMABUFSIZE; info->close_delay = 5*HZ/10; @@ -3490,7 +3488,7 @@ static void slgt_cleanup(void) /* release devices */ info = slgt_device_list; while(info) { -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_exit(info); #endif free_dma_bufs(info); @@ -3524,7 +3522,6 @@ static int __init slgt_init(void) if (!slgt_device_list) { printk("%s no devices found\n",driver_name); - pci_unregister_driver(&pci_driver); return -ENODEV; } @@ -4436,7 +4433,7 @@ static int rx_get_frame(struct slgt_info *info) framesize = 0; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (framesize == 0) { struct net_device_stats *stats = hdlc_stats(info->netdev); stats->rx_errors++; @@ -4479,7 +4476,7 @@ static int rx_get_frame(struct slgt_info *info) framesize++; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_rx(info,info->tmp_rbuf, framesize); else @@ -4782,7 +4779,7 @@ static void tx_timeout(unsigned long context) info->tx_count = 0; spin_unlock_irqrestore(&info->lock,flags); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -4802,6 +4799,6 @@ static void rx_timeout(unsigned long context) spin_lock_irqsave(&info->lock, flags); info->pending_bh |= BH_RECEIVE; spin_unlock_irqrestore(&info->lock, flags); - bh_handler(&info->task); + bh_handler(info); } diff --git a/trunk/drivers/char/synclinkmp.c b/trunk/drivers/char/synclinkmp.c index 20a96ef250be..3e932b681371 100644 --- a/trunk/drivers/char/synclinkmp.c +++ b/trunk/drivers/char/synclinkmp.c @@ -67,10 +67,8 @@ #include #include -#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE)) -#define SYNCLINK_GENERIC_HDLC 1 -#else -#define SYNCLINK_GENERIC_HDLC 0 +#ifdef CONFIG_HDLC_MODULE +#define CONFIG_HDLC 1 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) @@ -282,7 +280,7 @@ typedef struct _synclinkmp_info { int dosyncppp; spinlock_t netlock; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC struct net_device *netdev; #endif @@ -538,7 +536,7 @@ static void throttle(struct tty_struct * tty); static void unthrottle(struct tty_struct * tty); static void set_break(struct tty_struct *tty, int break_state); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(SLMP_INFO *info); static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size); @@ -604,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable); static void set_rate(SLMP_INFO *info, u32 data_rate); static int bh_action(SLMP_INFO *info); -static void bh_handler(struct work_struct *work); +static void bh_handler(void* Context); static void bh_receive(SLMP_INFO *info); static void bh_transmit(SLMP_INFO *info); static void bh_status(SLMP_INFO *info); @@ -1609,7 +1607,7 @@ static void set_break(struct tty_struct *tty, int break_state) spin_unlock_irqrestore(&info->lock,flags); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) @@ -2065,9 +2063,9 @@ int bh_action(SLMP_INFO *info) /* Perform bottom half processing of work items queued by ISR. */ -void bh_handler(struct work_struct *work) +void bh_handler(void* Context) { - SLMP_INFO *info = container_of(work, SLMP_INFO, task); + SLMP_INFO *info = (SLMP_INFO*)Context; int action; if (!info) @@ -2341,7 +2339,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status) set_signals(info); } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else @@ -2525,7 +2523,7 @@ void isr_io_pin( SLMP_INFO *info, u16 status ) info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) { if (status & SerialSignal_DCD) netif_carrier_on(info->netdev); @@ -3785,7 +3783,7 @@ void add_device(SLMP_INFO *info) info->irq_level, info->max_frame_size ); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_init(info); #endif } @@ -3807,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) } else { memset(info, 0, sizeof(SLMP_INFO)); info->magic = MGSL_MAGIC; - INIT_WORK(&info->task, bh_handler); + INIT_WORK(&info->task, bh_handler, info); info->max_frame_size = 4096; info->close_delay = 5*HZ/10; info->closing_wait = 30*HZ; @@ -3979,7 +3977,7 @@ static void synclinkmp_cleanup(void) /* release devices */ info = synclinkmp_device_list; while(info) { -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC hdlcdev_exit(info); #endif free_dma_bufs(info); @@ -4981,7 +4979,7 @@ int rx_get_frame(SLMP_INFO *info) info->icount.rxcrc++; framesize = 0; -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC { struct net_device_stats *stats = hdlc_stats(info->netdev); stats->rx_errors++; @@ -5022,7 +5020,7 @@ int rx_get_frame(SLMP_INFO *info) index = 0; } -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_rx(info,info->tmp_rx_buf,framesize); else @@ -5533,7 +5531,7 @@ void tx_timeout(unsigned long context) spin_unlock_irqrestore(&info->lock,flags); -#if SYNCLINK_GENERIC_HDLC +#ifdef CONFIG_HDLC if (info->netcount) hdlcdev_tx_done(info); else diff --git a/trunk/drivers/char/sysrq.c b/trunk/drivers/char/sysrq.c index 05810c8d20bc..5f49280779fb 100644 --- a/trunk/drivers/char/sysrq.c +++ b/trunk/drivers/char/sysrq.c @@ -182,18 +182,6 @@ static struct sysrq_key_op sysrq_showstate_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; -static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) -{ - show_state_filter(TASK_UNINTERRUPTIBLE); -} -static struct sysrq_key_op sysrq_showstate_blocked_op = { - .handler = sysrq_handle_showstate_blocked, - .help_msg = "showBlockedTasks", - .action_msg = "Show Blocked State", - .enable_mask = SYSRQ_ENABLE_DUMP, -}; - - static void sysrq_handle_showmem(int key, struct tty_struct *tty) { show_mem(); @@ -231,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = { .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -static void moom_callback(struct work_struct *ignored) +static void moom_callback(void *ignored) { out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0); } -static DECLARE_WORK(moom_work, moom_callback); +static DECLARE_WORK(moom_work, moom_callback, NULL); static void sysrq_handle_moom(int key, struct tty_struct *tty) { @@ -316,7 +304,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { /* May be assigned at init time by SMP VOYAGER */ NULL, /* v */ NULL, /* w */ - &sysrq_showstate_blocked_op, /* x */ + NULL, /* x */ NULL, /* y */ NULL /* z */ }; diff --git a/trunk/drivers/char/toshiba.c b/trunk/drivers/char/toshiba.c index 07067c31c4ec..dd36fd04a842 100644 --- a/trunk/drivers/char/toshiba.c +++ b/trunk/drivers/char/toshiba.c @@ -249,7 +249,6 @@ int tosh_smm(SMMRegisters *regs) return eax; } -EXPORT_SYMBOL(tosh_smm); static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c index 33e1f66e39cb..6e1329d404d2 100644 --- a/trunk/drivers/char/tpm/tpm.c +++ b/trunk/drivers/char/tpm/tpm.c @@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr) schedule_work(&chip->work); } -static void timeout_work(struct work_struct *work) +static void timeout_work(void *ptr) { - struct tpm_chip *chip = container_of(work, struct tpm_chip, work); + struct tpm_chip *chip = ptr; down(&chip->buffer_mutex); atomic_set(&chip->data_pending, 0); @@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend init_MUTEX(&chip->tpm_mutex); INIT_LIST_HEAD(&chip->list); - INIT_WORK(&chip->work, timeout_work); + INIT_WORK(&chip->work, timeout_work, chip); init_timer(&chip->user_read_timer); chip->user_read_timer.function = user_reader_timeout; @@ -1155,7 +1155,6 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) { list_del(&chip->list); - misc_deregister(&chip->vendor.miscdev); put_device(dev); clear_bit(chip->dev_num, dev_mask); kfree(chip); diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c index b3cfc8bc613c..50dc49205a23 100644 --- a/trunk/drivers/char/tty_io.c +++ b/trunk/drivers/char/tty_io.c @@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); /** * do_tty_hangup - actual handler for hangup events - * @work: tty device + * @data: tty device * * This can be called by the "eventd" kernel thread. That is process * synchronous but doesn't hold any locks, so we need to make sure we @@ -1274,10 +1274,9 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); * tasklist_lock to walk task list for hangup event * */ -static void do_tty_hangup(struct work_struct *work) +static void do_tty_hangup(void *data) { - struct tty_struct *tty = - container_of(work, struct tty_struct, hangup_work); + struct tty_struct *tty = (struct tty_struct *) data; struct file * cons_filp = NULL; struct file *filp, *f = NULL; struct task_struct *p; @@ -1434,7 +1433,7 @@ void tty_vhangup(struct tty_struct * tty) printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); #endif - do_tty_hangup(&tty->hangup_work); + do_tty_hangup((void *) tty); } EXPORT_SYMBOL(tty_vhangup); @@ -3305,13 +3304,12 @@ int tty_ioctl(struct inode * inode, struct file * file, * Nasty bug: do_SAK is being called in interrupt context. This can * deadlock. We punt it up to process context. AKPM - 16Mar2001 */ -static void __do_SAK(struct work_struct *work) +static void __do_SAK(void *arg) { - struct tty_struct *tty = - container_of(work, struct tty_struct, SAK_work); #ifdef TTY_SOFT_SAK tty_hangup(tty); #else + struct tty_struct *tty = arg; struct task_struct *g, *p; int session; int i; @@ -3390,7 +3388,7 @@ void do_SAK(struct tty_struct *tty) { if (!tty) return; - PREPARE_WORK(&tty->SAK_work, __do_SAK); + PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); schedule_work(&tty->SAK_work); } @@ -3398,7 +3396,7 @@ EXPORT_SYMBOL(do_SAK); /** * flush_to_ldisc - * @work: tty structure passed from work queue. + * @private_: tty structure passed from work queue. * * This routine is called out of the software interrupt to flush data * from the buffer chain to the line discipline. @@ -3408,10 +3406,9 @@ EXPORT_SYMBOL(do_SAK); * receive_buf method is single threaded for each tty instance. */ -static void flush_to_ldisc(struct work_struct *work) +static void flush_to_ldisc(void *private_) { - struct tty_struct *tty = - container_of(work, struct tty_struct, buf.work.work); + struct tty_struct *tty = (struct tty_struct *) private_; unsigned long flags; struct tty_ldisc *disc; struct tty_buffer *tbuf, *head; @@ -3556,7 +3553,7 @@ void tty_flip_buffer_push(struct tty_struct *tty) spin_unlock_irqrestore(&tty->buf.lock, flags); if (tty->low_latency) - flush_to_ldisc(&tty->buf.work.work); + flush_to_ldisc((void *) tty); else schedule_delayed_work(&tty->buf.work, 1); } @@ -3583,17 +3580,17 @@ static void initialize_tty_struct(struct tty_struct *tty) tty->overrun_time = jiffies; tty->buf.head = tty->buf.tail = NULL; tty_buffer_init(tty); - INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); + INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); init_MUTEX(&tty->buf.pty_sem); mutex_init(&tty->termios_mutex); init_waitqueue_head(&tty->write_wait); init_waitqueue_head(&tty->read_wait); - INIT_WORK(&tty->hangup_work, do_tty_hangup); + INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); mutex_init(&tty->atomic_read_lock); mutex_init(&tty->atomic_write_lock); spin_lock_init(&tty->read_lock); INIT_LIST_HEAD(&tty->tty_files); - INIT_WORK(&tty->SAK_work, NULL); + INIT_WORK(&tty->SAK_work, NULL, NULL); } /* diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c index a8239dac994f..87587b4385ab 100644 --- a/trunk/drivers/char/vt.c +++ b/trunk/drivers/char/vt.c @@ -152,10 +152,10 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y); static void save_cur(struct vc_data *vc); static void reset_terminal(struct vc_data *vc, int do_clear); static void con_flush_chars(struct tty_struct *tty); -static int set_vesa_blanking(char __user *p); +static void set_vesa_blanking(char __user *p); static void set_cursor(struct vc_data *vc); static void hide_cursor(struct vc_data *vc); -static void console_callback(struct work_struct *ignored); +static void console_callback(void *ignored); static void blank_screen_t(unsigned long dummy); static void set_palette(struct vc_data *vc); @@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */ static int blankinterval = 10*60*HZ; static int vesa_off_interval; -static DECLARE_WORK(console_work, console_callback); +static DECLARE_WORK(console_work, console_callback, NULL); /* * fg_console is the current virtual console, @@ -2154,7 +2154,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co * with other console code and prevention of re-entrancy is * ensured with console_sem. */ -static void console_callback(struct work_struct *ignored) +static void console_callback(void *ignored) { acquire_console_sem(); @@ -2369,7 +2369,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) ret = __put_user(data, p); break; case TIOCL_SETVESABLANK: - ret = set_vesa_blanking(p); + set_vesa_blanking(p); break; case TIOCL_GETKMSGREDIRECT: data = kmsg_redirect; @@ -3313,15 +3313,11 @@ postcore_initcall(vtconsole_class_init); * Screen blanking */ -static int set_vesa_blanking(char __user *p) +static void set_vesa_blanking(char __user *p) { - unsigned int mode; - - if (get_user(mode, p + 1)) - return -EFAULT; - - vesa_blank_mode = (mode < 4) ? mode : 0; - return 0; + unsigned int mode; + get_user(mode, p + 1); + vesa_blank_mode = (mode < 4) ? mode : 0; } void do_blank_screen(int entering_gfx) diff --git a/trunk/drivers/char/watchdog/pcwd_usb.c b/trunk/drivers/char/watchdog/pcwd_usb.c index 61138726b501..e275dd4a705d 100644 --- a/trunk/drivers/char/watchdog/pcwd_usb.c +++ b/trunk/drivers/char/watchdog/pcwd_usb.c @@ -634,7 +634,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); /* set up the memory buffer's */ - if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) { + if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) { printk(KERN_ERR PFX "Out of memory\n"); goto error; } diff --git a/trunk/drivers/connector/cn_queue.c b/trunk/drivers/connector/cn_queue.c index b418b16e910e..05f8ce2cfb4a 100644 --- a/trunk/drivers/connector/cn_queue.c +++ b/trunk/drivers/connector/cn_queue.c @@ -31,11 +31,9 @@ #include #include -void cn_queue_wrapper(struct work_struct *work) +void cn_queue_wrapper(void *data) { - struct cn_callback_entry *cbq = - container_of(work, struct cn_callback_entry, work.work); - struct cn_callback_data *d = &cbq->data; + struct cn_callback_data *d = data; d->callback(d->callback_priv); @@ -59,7 +57,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc memcpy(&cbq->id.id, id, sizeof(struct cb_id)); cbq->data.callback = callback; - INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper); + INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); return cbq; } diff --git a/trunk/drivers/connector/connector.c b/trunk/drivers/connector/connector.c index 5e7cd45d10ee..b49bacfd8de8 100644 --- a/trunk/drivers/connector/connector.c +++ b/trunk/drivers/connector/connector.c @@ -135,39 +135,40 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) { - if (likely(!test_bit(WORK_STRUCT_PENDING, - &__cbq->work.work.management) && + if (likely(!test_bit(0, &__cbq->work.pending) && __cbq->data.ddata == NULL)) { __cbq->data.callback_priv = msg; __cbq->data.ddata = data; __cbq->data.destruct_data = destruct_data; - if (queue_delayed_work( - dev->cbdev->cn_queue, - &__cbq->work, 0)) + if (queue_work(dev->cbdev->cn_queue, + &__cbq->work)) err = 0; } else { + struct work_struct *w; struct cn_callback_data *d; - __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC); - if (__cbq) { - d = &__cbq->data; + w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); + if (w) { + d = (struct cn_callback_data *)(w+1); + d->callback_priv = msg; d->callback = __cbq->data.callback; d->ddata = data; d->destruct_data = destruct_data; - d->free = __cbq; + d->free = w; - INIT_DELAYED_WORK(&__cbq->work, - &cn_queue_wrapper); + INIT_LIST_HEAD(&w->entry); + w->pending = 0; + w->func = &cn_queue_wrapper; + w->data = d; + init_timer(&w->timer); - if (queue_delayed_work( - dev->cbdev->cn_queue, - &__cbq->work, 0)) + if (queue_work(dev->cbdev->cn_queue, w)) err = 0; else { - kfree(__cbq); + kfree(w); err = -EINVAL; } } else diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 47ab42db122a..dd0c2623e27b 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); -static void handle_update(struct work_struct *work); +static void handle_update(void *data); /** * Two notifier lists: the "policy" list is involved in the @@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) mutex_init(&policy->lock); mutex_lock(&policy->lock); init_completion(&policy->kobj_unregister); - INIT_WORK(&policy->update, handle_update); + INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU @@ -895,11 +895,9 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) } -static void handle_update(struct work_struct *work) +static void handle_update(void *data) { - struct cpufreq_policy *policy = - container_of(work, struct cpufreq_policy, update); - unsigned int cpu = policy->cpu; + unsigned int cpu = (unsigned int)(long)data; dprintk("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -1537,6 +1535,7 @@ int cpufreq_update_policy(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_update_policy); +#ifdef CONFIG_HOTPLUG_CPU static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1576,6 +1575,7 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = { .notifier_call = cpufreq_cpu_callback, }; +#endif /* CONFIG_HOTPLUG_CPU */ /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * diff --git a/trunk/drivers/cpufreq/cpufreq_conservative.c b/trunk/drivers/cpufreq/cpufreq_conservative.c index 5ef5ede5b884..c4c578defabf 100644 --- a/trunk/drivers/cpufreq/cpufreq_conservative.c +++ b/trunk/drivers/cpufreq/cpufreq_conservative.c @@ -59,7 +59,7 @@ static unsigned int def_sampling_rate; #define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -static void do_dbs_timer(struct work_struct *work); +static void do_dbs_timer(void *data); struct cpu_dbs_info_s { struct cpufreq_policy *cur_policy; @@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ * is recursive for the same process. -Venki */ static DEFINE_MUTEX (dbs_mutex); -static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); +static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); struct dbs_tuners { unsigned int sampling_rate; @@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu) } } -static void do_dbs_timer(struct work_struct *work) +static void do_dbs_timer(void *data) { int i; lock_cpu_hotplug(); @@ -435,6 +435,7 @@ static void do_dbs_timer(struct work_struct *work) static inline void dbs_timer_init(void) { + INIT_WORK(&dbs_work, do_dbs_timer, NULL); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); return; diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index e1cc5113c2ae..bf8aa45d4f01 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -47,17 +47,13 @@ static unsigned int def_sampling_rate; #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) #define TRANSITION_LATENCY_LIMIT (10 * 1000) -static void do_dbs_timer(struct work_struct *work); - -/* Sampling types */ -enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; +static void do_dbs_timer(void *data); struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; struct cpufreq_policy *cur_policy; - struct delayed_work work; - enum dbs_sample sample_type; + struct work_struct work; unsigned int enable; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; @@ -411,31 +407,30 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) } } -static void do_dbs_timer(struct work_struct *work) +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +static void do_dbs_timer(void *data) { unsigned int cpu = smp_processor_id(); struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); - enum dbs_sample sample_type = dbs_info->sample_type; /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - /* Permit rescheduling of this work item */ - work_release(work); - delay -= jiffies % delay; if (!dbs_info->enable) return; /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { + (unsigned long) data == DBS_NORMAL_SAMPLE) { lock_cpu_hotplug(); dbs_check_cpu(dbs_info); unlock_cpu_hotplug(); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; + INIT_WORK(&dbs_info->work, do_dbs_timer, + (void *)DBS_SUB_SAMPLE); delay = dbs_info->freq_hi_jiffies; } } else { @@ -454,8 +449,7 @@ static inline void dbs_timer_init(unsigned int cpu) delay -= jiffies % delay; ondemand_powersave_bias_init(); - INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); - dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); } diff --git a/trunk/drivers/dma/ioatdma.c b/trunk/drivers/dma/ioatdma.c index 8e8726104619..0358419a0e48 100644 --- a/trunk/drivers/dma/ioatdma.c +++ b/trunk/drivers/dma/ioatdma.c @@ -636,10 +636,10 @@ static int ioat_self_test(struct ioat_device *device) dma_cookie_t cookie; int err = 0; - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); if (!dest) { kfree(src); return -ENOMEM; diff --git a/trunk/drivers/edac/edac_mc.c b/trunk/drivers/edac/edac_mc.c index 1b4fc9221803..75e9e38330ff 100644 --- a/trunk/drivers/edac/edac_mc.c +++ b/trunk/drivers/edac/edac_mc.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/i2c/chips/ds1374.c b/trunk/drivers/i2c/chips/ds1374.c index 15edf40828b4..4630f1969a09 100644 --- a/trunk/drivers/i2c/chips/ds1374.c +++ b/trunk/drivers/i2c/chips/ds1374.c @@ -140,14 +140,12 @@ ulong ds1374_get_rtc_time(void) return t1; } -static ulong new_time; - -static void ds1374_set_work(struct work_struct *work) +static void ds1374_set_work(void *arg) { ulong t1, t2; int limit = 10; /* arbitrary retry limit */ - t1 = new_time; + t1 = *(ulong *) arg; mutex_lock(&ds1374_mutex); @@ -169,9 +167,11 @@ static void ds1374_set_work(struct work_struct *work) "can't confirm time set from rtc chip\n"); } +static ulong new_time; + static struct workqueue_struct *ds1374_workqueue; -static DECLARE_WORK(ds1374_work, ds1374_set_work); +static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); int ds1374_set_rtc_time(ulong nowtime) { @@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime) if (in_interrupt()) queue_work(ds1374_workqueue, &ds1374_work); else - ds1374_set_work(NULL); + ds1374_set_work(&new_time); return 0; } diff --git a/trunk/drivers/i2c/chips/m41t00.c b/trunk/drivers/i2c/chips/m41t00.c index 420377c86422..2dd0a34d9472 100644 --- a/trunk/drivers/i2c/chips/m41t00.c +++ b/trunk/drivers/i2c/chips/m41t00.c @@ -215,15 +215,8 @@ m41t00_set(void *arg) } static ulong new_time; -/* well, isn't this API just _lovely_? */ -static void -m41t00_barf(struct work_struct *unusable) -{ - m41t00_set(&new_time); -} - static struct workqueue_struct *m41t00_wq; -static DECLARE_WORK(m41t00_work, m41t00_barf); +static DECLARE_WORK(m41t00_work, m41t00_set, &new_time); int m41t00_set_rtc_time(ulong nowtime) diff --git a/trunk/drivers/ide/Kconfig b/trunk/drivers/ide/Kconfig index e23bc0d62159..0c68d0f0d8e5 100644 --- a/trunk/drivers/ide/Kconfig +++ b/trunk/drivers/ide/Kconfig @@ -389,6 +389,14 @@ config BLK_DEV_RZ1000 Linux. This may slow disk throughput by a few percent, but at least things will operate 100% reliably. +config BLK_DEV_SL82C105 + tristate "Winbond SL82c105 support" + depends on PCI && (PPC || ARM) && BLK_DEV_IDEPCI + help + If you have a Winbond SL82c105 IDE controller, say Y here to enable + special configuration for this chip. This is common on various CHRP + motherboards, but could be used elsewhere. If in doubt, say Y. + config BLK_DEV_IDEDMA_PCI bool "Generic PCI bus-master DMA support" depends on PCI && BLK_DEV_IDEPCI @@ -704,14 +712,6 @@ config BLK_DEV_SIS5513 Please read the comments at the top of . -config BLK_DEV_SL82C105 - tristate "Winbond SL82c105 support" - depends on (PPC || ARM) - help - If you have a Winbond SL82c105 IDE controller, say Y here to enable - special configuration for this chip. This is common on various CHRP - motherboards, but could be used elsewhere. If in doubt, say Y. - config BLK_DEV_SLC90E66 tristate "SLC90E66 chipset support" help diff --git a/trunk/drivers/ide/ide.c b/trunk/drivers/ide/ide.c index 16890769dca6..287a66201150 100644 --- a/trunk/drivers/ide/ide.c +++ b/trunk/drivers/ide/ide.c @@ -973,8 +973,8 @@ ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name) * @drive: drive * * Automatically remove all the driver specific settings for this - * drive. This function may not be called from IRQ context. The - * caller must hold ide_setting_sem. + * drive. This function may sleep and must not be called from IRQ + * context. The caller must hold ide_setting_sem. */ static void auto_remove_settings (ide_drive_t *drive) @@ -1874,22 +1874,11 @@ void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver) { unsigned long flags; + down(&ide_setting_sem); + spin_lock_irqsave(&ide_lock, flags); #ifdef CONFIG_PROC_FS ide_remove_proc_entries(drive->proc, driver->proc); #endif - down(&ide_setting_sem); - spin_lock_irqsave(&ide_lock, flags); - /* - * ide_setting_sem protects the settings list - * ide_lock protects the use of settings - * - * so we need to hold both, ide_settings_sem because we want to - * modify the settings list, and ide_lock because we cannot take - * a setting out that is being used. - * - * OTOH both ide_{read,write}_setting are only ever used under - * ide_setting_sem. - */ auto_remove_settings(drive); spin_unlock_irqrestore(&ide_lock, flags); up(&ide_setting_sem); diff --git a/trunk/drivers/ide/pci/via82cxxx.c b/trunk/drivers/ide/pci/via82cxxx.c index 61f1a9665a7f..eb7ab112c050 100644 --- a/trunk/drivers/ide/pci/via82cxxx.c +++ b/trunk/drivers/ide/pci/via82cxxx.c @@ -282,11 +282,11 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const * Find the ISA bridge to see how good the IDE is. */ via_config = via_config_find(&isa); - - /* We checked this earlier so if it fails here deeep badness - is involved */ - - BUG_ON(!via_config->id); + if (!via_config->id) { + printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n"); + pci_dev_put(isa); + return -ENODEV; + } /* * Setup or disable Clk66 if appropriate @@ -494,17 +494,6 @@ static ide_pci_device_t via82cxxx_chipsets[] __devinitdata = { static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - struct pci_dev *isa = NULL; - struct via_isa_bridge *via_config; - /* - * Find the ISA bridge and check we know what it is. - */ - via_config = via_config_find(&isa); - pci_dev_put(isa); - if (!via_config->id) { - printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n"); - return -ENODEV; - } return ide_setup_pci_device(dev, &via82cxxx_chipsets[id->driver_data]); } diff --git a/trunk/drivers/ieee1394/eth1394.c b/trunk/drivers/ieee1394/eth1394.c index 27d6c642415d..31e5cc49d61a 100644 --- a/trunk/drivers/ieee1394/eth1394.c +++ b/trunk/drivers/ieee1394/eth1394.c @@ -133,7 +133,7 @@ struct eth1394_node_info { #define ETH1394_DRIVER_NAME "eth1394" static const char driver_name[] = ETH1394_DRIVER_NAME; -static struct kmem_cache *packet_task_cache; +static kmem_cache_t *packet_task_cache; static struct hpsb_highlevel eth1394_highlevel; diff --git a/trunk/drivers/ieee1394/hosts.c b/trunk/drivers/ieee1394/hosts.c index b935e08695a9..d90a3a1898c0 100644 --- a/trunk/drivers/ieee1394/hosts.c +++ b/trunk/drivers/ieee1394/hosts.c @@ -31,10 +31,9 @@ #include "config_roms.h" -static void delayed_reset_bus(struct work_struct *work) +static void delayed_reset_bus(void * __reset_info) { - struct hpsb_host *host = - container_of(work, struct hpsb_host, delayed_reset.work); + struct hpsb_host *host = (struct hpsb_host*)__reset_info; int generation = host->csr.generation + 1; /* The generation field rolls over to 2 rather than 0 per IEEE @@ -123,7 +122,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, int i; int hostnum = 0; - h = kzalloc(sizeof(*h) + extra, GFP_KERNEL); + h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); if (!h) return NULL; @@ -146,7 +145,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, atomic_set(&h->generation, 0); - INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); + INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); init_timer(&h->timeout); h->timeout.data = (unsigned long) h; @@ -235,7 +234,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host) * Config ROM in the near future. */ reset_delay = HZ; - PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); + PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); schedule_delayed_work(&host->delayed_reset, reset_delay); return 0; diff --git a/trunk/drivers/ieee1394/hosts.h b/trunk/drivers/ieee1394/hosts.h index d553e38c9543..bc6dbfadb891 100644 --- a/trunk/drivers/ieee1394/hosts.h +++ b/trunk/drivers/ieee1394/hosts.h @@ -62,7 +62,7 @@ struct hpsb_host { struct class_device class_dev; int update_config_rom; - struct delayed_work delayed_reset; + struct work_struct delayed_reset; unsigned int config_roms; struct list_head addr_space; diff --git a/trunk/drivers/ieee1394/nodemgr.c b/trunk/drivers/ieee1394/nodemgr.c index e829c9336b3c..8e7b83f84485 100644 --- a/trunk/drivers/ieee1394/nodemgr.c +++ b/trunk/drivers/ieee1394/nodemgr.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include "csr.h" diff --git a/trunk/drivers/ieee1394/ohci1394.c b/trunk/drivers/ieee1394/ohci1394.c index eae97d8dcf03..6e8ea9110c46 100644 --- a/trunk/drivers/ieee1394/ohci1394.c +++ b/trunk/drivers/ieee1394/ohci1394.c @@ -1225,7 +1225,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso) int ctx; int ret = -ENOMEM; - recv = kmalloc(sizeof(*recv), GFP_KERNEL); + recv = kmalloc(sizeof(*recv), SLAB_KERNEL); if (!recv) return -ENOMEM; @@ -1918,7 +1918,7 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso) int ctx; int ret = -ENOMEM; - xmit = kmalloc(sizeof(*xmit), GFP_KERNEL); + xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL); if (!xmit) return -ENOMEM; @@ -3021,7 +3021,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, return -ENOMEM; } - d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); + d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i); if (d->prg_cpu[i] != NULL) { @@ -3117,7 +3117,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, OHCI_DMA_ALLOC("dma_rcv prg pool"); for (i = 0; i < d->num_desc; i++) { - d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); + d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i); if (d->prg_cpu[i] != NULL) { diff --git a/trunk/drivers/ieee1394/pcilynx.c b/trunk/drivers/ieee1394/pcilynx.c index 9cab1d661472..0a7412e27eb4 100644 --- a/trunk/drivers/ieee1394/pcilynx.c +++ b/trunk/drivers/ieee1394/pcilynx.c @@ -1428,7 +1428,7 @@ static int __devinit add_card(struct pci_dev *dev, struct i2c_algo_bit_data i2c_adapter_data; error = -ENOMEM; - i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL); + i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL); if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter)); diff --git a/trunk/drivers/ieee1394/raw1394.c b/trunk/drivers/ieee1394/raw1394.c index bf71e069eaf5..5ec4f5eb6b19 100644 --- a/trunk/drivers/ieee1394/raw1394.c +++ b/trunk/drivers/ieee1394/raw1394.c @@ -112,7 +112,7 @@ static struct pending_request *__alloc_pending_request(gfp_t flags) static inline struct pending_request *alloc_pending_request(void) { - return __alloc_pending_request(GFP_KERNEL); + return __alloc_pending_request(SLAB_KERNEL); } static void free_pending_request(struct pending_request *req) @@ -259,7 +259,7 @@ static void host_reset(struct hpsb_host *host) if (hi != NULL) { list_for_each_entry(fi, &hi->file_info_list, list) { if (fi->notification == RAW1394_NOTIFY_ON) { - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (req != NULL) { req->file_info = fi; @@ -306,13 +306,13 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data, if (!(fi->listen_channels & (1ULL << channel))) continue; - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) break; if (!ibs) { ibs = kmalloc(sizeof(*ibs) + length, - GFP_ATOMIC); + SLAB_ATOMIC); if (!ibs) { kfree(req); break; @@ -367,13 +367,13 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction, if (!fi->fcp_buffer) continue; - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) break; if (!ibs) { ibs = kmalloc(sizeof(*ibs) + length, - GFP_ATOMIC); + SLAB_ATOMIC); if (!ibs) { kfree(req); break; @@ -593,7 +593,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) switch (req->req.type) { case RAW1394_REQ_LIST_CARDS: spin_lock_irqsave(&host_info_lock, flags); - khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC); + khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC); if (khl) { req->req.misc = host_count; @@ -1045,7 +1045,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, } if (arm_addr->notification_options & ARM_READ) { DBGMSG("arm_read -> entering notification-section"); - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_read -> rcode_conflict_error"); spin_unlock_irqrestore(&host_info_lock, irqflags); @@ -1064,7 +1064,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, sizeof(struct arm_response) + sizeof(struct arm_request_response); } - req->data = kmalloc(size, GFP_ATOMIC); + req->data = kmalloc(size, SLAB_ATOMIC); if (!(req->data)) { free_pending_request(req); DBGMSG("arm_read -> rcode_conflict_error"); @@ -1198,7 +1198,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, } if (arm_addr->notification_options & ARM_WRITE) { DBGMSG("arm_write -> entering notification-section"); - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_write -> rcode_conflict_error"); spin_unlock_irqrestore(&host_info_lock, irqflags); @@ -1209,7 +1209,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, sizeof(struct arm_request) + sizeof(struct arm_response) + (length) * sizeof(byte_t) + sizeof(struct arm_request_response); - req->data = kmalloc(size, GFP_ATOMIC); + req->data = kmalloc(size, SLAB_ATOMIC); if (!(req->data)) { free_pending_request(req); DBGMSG("arm_write -> rcode_conflict_error"); @@ -1400,7 +1400,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, if (arm_addr->notification_options & ARM_LOCK) { byte_t *buf1, *buf2; DBGMSG("arm_lock -> entering notification-section"); - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_lock -> rcode_conflict_error"); spin_unlock_irqrestore(&host_info_lock, irqflags); @@ -1408,7 +1408,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, The request may be retried */ } size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ - req->data = kmalloc(size, GFP_ATOMIC); + req->data = kmalloc(size, SLAB_ATOMIC); if (!(req->data)) { free_pending_request(req); DBGMSG("arm_lock -> rcode_conflict_error"); @@ -1628,7 +1628,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, if (arm_addr->notification_options & ARM_LOCK) { byte_t *buf1, *buf2; DBGMSG("arm_lock64 -> entering notification-section"); - req = __alloc_pending_request(GFP_ATOMIC); + req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { spin_unlock_irqrestore(&host_info_lock, irqflags); DBGMSG("arm_lock64 -> rcode_conflict_error"); @@ -1636,7 +1636,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, The request may be retried */ } size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */ - req->data = kmalloc(size, GFP_ATOMIC); + req->data = kmalloc(size, SLAB_ATOMIC); if (!(req->data)) { free_pending_request(req); spin_unlock_irqrestore(&host_info_lock, irqflags); @@ -1737,7 +1737,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req) return (-EINVAL); } /* addr-list-entry for fileinfo */ - addr = kmalloc(sizeof(*addr), GFP_KERNEL); + addr = kmalloc(sizeof(*addr), SLAB_KERNEL); if (!addr) { req->req.length = 0; return (-ENOMEM); @@ -2103,7 +2103,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req) static int get_config_rom(struct file_info *fi, struct pending_request *req) { int ret = sizeof(struct raw1394_request); - quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); + quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); int status; if (!data) @@ -2133,7 +2133,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req) static int update_config_rom(struct file_info *fi, struct pending_request *req) { int ret = sizeof(struct raw1394_request); - quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); + quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); if (!data) return -ENOMEM; if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) { @@ -2443,7 +2443,7 @@ static void queue_rawiso_event(struct file_info *fi) /* only one ISO activity event may be in the queue */ if (!__rawiso_event_in_queue(fi)) { struct pending_request *req = - __alloc_pending_request(GFP_ATOMIC); + __alloc_pending_request(SLAB_ATOMIC); if (req) { req->file_info = fi; @@ -2779,7 +2779,7 @@ static int raw1394_open(struct inode *inode, struct file *file) { struct file_info *fi; - fi = kzalloc(sizeof(*fi), GFP_KERNEL); + fi = kzalloc(sizeof(*fi), SLAB_KERNEL); if (!fi) return -ENOMEM; diff --git a/trunk/drivers/ieee1394/sbp2.c b/trunk/drivers/ieee1394/sbp2.c index cd156d4e779e..6986ac188281 100644 --- a/trunk/drivers/ieee1394/sbp2.c +++ b/trunk/drivers/ieee1394/sbp2.c @@ -493,25 +493,20 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, scsi_unblock_requests(scsi_id->scsi_host); } -static void sbp2util_write_orb_pointer(struct work_struct *work) +static void sbp2util_write_orb_pointer(void *p) { - struct scsi_id_instance_data *scsi_id = - container_of(work, struct scsi_id_instance_data, - protocol_work.work); quadlet_t data[2]; - data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id); - data[1] = scsi_id->last_orb_dma; + data[0] = ORB_SET_NODE_ID( + ((struct scsi_id_instance_data *)p)->hi->host->node_id); + data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma; sbp2util_cpu_to_be32_buffer(data, 8); - sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8); + sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); } -static void sbp2util_write_doorbell(struct work_struct *work) +static void sbp2util_write_doorbell(void *p) { - struct scsi_id_instance_data *scsi_id = - container_of(work, struct scsi_id_instance_data, - protocol_work.work); - sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4); + sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); } /* @@ -848,7 +843,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud INIT_LIST_HEAD(&scsi_id->scsi_list); spin_lock_init(&scsi_id->sbp2_command_orb_lock); atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); - INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL); + INIT_WORK(&scsi_id->protocol_work, NULL, NULL); ud->device.driver_data = scsi_id; @@ -2052,10 +2047,11 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, * We do not accept new commands until the job is over. */ scsi_block_requests(scsi_id->scsi_host); - PREPARE_DELAYED_WORK(&scsi_id->protocol_work, + PREPARE_WORK(&scsi_id->protocol_work, last_orb ? sbp2util_write_doorbell: - sbp2util_write_orb_pointer); - schedule_delayed_work(&scsi_id->protocol_work, 0); + sbp2util_write_orb_pointer, + scsi_id); + schedule_work(&scsi_id->protocol_work); } } diff --git a/trunk/drivers/ieee1394/sbp2.h b/trunk/drivers/ieee1394/sbp2.h index 1b16d6b9cf11..abbe48e646c3 100644 --- a/trunk/drivers/ieee1394/sbp2.h +++ b/trunk/drivers/ieee1394/sbp2.h @@ -348,7 +348,7 @@ struct scsi_id_instance_data { unsigned workarounds; atomic_t state; - struct delayed_work protocol_work; + struct work_struct protocol_work; }; /* For use in scsi_id_instance_data.state */ diff --git a/trunk/drivers/infiniband/core/addr.c b/trunk/drivers/infiniband/core/addr.c index af939796750d..7767a11b6890 100644 --- a/trunk/drivers/infiniband/core/addr.c +++ b/trunk/drivers/infiniband/core/addr.c @@ -55,11 +55,11 @@ struct addr_req { int status; }; -static void process_req(struct work_struct *work); +static void process_req(void *data); static DEFINE_MUTEX(lock); static LIST_HEAD(req_list); -static DECLARE_DELAYED_WORK(work, process_req); +static DECLARE_WORK(work, process_req, NULL); static struct workqueue_struct *addr_wq; void rdma_addr_register_client(struct rdma_addr_client *client) @@ -215,7 +215,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, return ret; } -static void process_req(struct work_struct *work) +static void process_req(void *data) { struct addr_req *req, *temp_req; struct sockaddr_in *src_in, *dst_in; diff --git a/trunk/drivers/infiniband/core/cache.c b/trunk/drivers/infiniband/core/cache.c index 98272fbbfb31..20e9f64e67a6 100644 --- a/trunk/drivers/infiniband/core/cache.c +++ b/trunk/drivers/infiniband/core/cache.c @@ -285,10 +285,9 @@ static void ib_cache_update(struct ib_device *device, kfree(tprops); } -static void ib_cache_task(struct work_struct *_work) +static void ib_cache_task(void *work_ptr) { - struct ib_update_work *work = - container_of(_work, struct ib_update_work, work); + struct ib_update_work *work = work_ptr; ib_cache_update(work->device, work->port_num); kfree(work); @@ -307,7 +306,7 @@ static void ib_cache_event(struct ib_event_handler *handler, event->event == IB_EVENT_CLIENT_REREGISTER) { work = kmalloc(sizeof *work, GFP_ATOMIC); if (work) { - INIT_WORK(&work->work, ib_cache_task); + INIT_WORK(&work->work, ib_cache_task, work); work->device = event->device; work->port_num = event->element.port_num; schedule_work(&work->work); diff --git a/trunk/drivers/infiniband/core/cm.c b/trunk/drivers/infiniband/core/cm.c index 79c937bf6962..e5dc4530808a 100644 --- a/trunk/drivers/infiniband/core/cm.c +++ b/trunk/drivers/infiniband/core/cm.c @@ -101,7 +101,7 @@ struct cm_av { }; struct cm_work { - struct delayed_work work; + struct work_struct work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ @@ -161,7 +161,7 @@ struct cm_id_private { atomic_t work_count; }; -static void cm_work_handler(struct work_struct *work); +static void cm_work_handler(void *data); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { @@ -668,7 +668,8 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; - INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); + INIT_WORK(&timewait_info->work.work, cm_work_handler, + &timewait_info->work); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } @@ -2994,9 +2995,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, } } -static void cm_work_handler(struct work_struct *_work) +static void cm_work_handler(void *data) { - struct cm_work *work = container_of(_work, struct cm_work, work.work); + struct cm_work *work = data; int ret; switch (work->cm_event.event) { @@ -3086,12 +3087,12 @@ static int cm_establish(struct ib_cm_id *cm_id) * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ - INIT_DELAYED_WORK(&work->work, cm_work_handler); + INIT_WORK(&work->work, cm_work_handler, work); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; - queue_delayed_work(cm.wq, &work->work, 0); + queue_work(cm.wq, &work->work); out: return ret; } @@ -3190,11 +3191,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, return; } - INIT_DELAYED_WORK(&work->work, cm_work_handler); + INIT_WORK(&work->work, cm_work_handler, work); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = (struct cm_port *)mad_agent->context; - queue_delayed_work(cm.wq, &work->work, 0); + queue_work(cm.wq, &work->work); } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c index 985a6b564d8f..cf48f2697434 100644 --- a/trunk/drivers/infiniband/core/cma.c +++ b/trunk/drivers/infiniband/core/cma.c @@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, return (id_priv->query_id < 0) ? id_priv->query_id : 0; } -static void cma_work_handler(struct work_struct *_work) +static void cma_work_handler(void *data) { - struct cma_work *work = container_of(_work, struct cma_work, work); + struct cma_work *work = data; struct rdma_id_private *id_priv = work->id; int destroy = 0; @@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) return -ENOMEM; work->id = id_priv; - INIT_WORK(&work->work, cma_work_handler); + INIT_WORK(&work->work, cma_work_handler, work); work->old_state = CMA_ROUTE_QUERY; work->new_state = CMA_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; @@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) return -ENOMEM; work->id = id_priv; - INIT_WORK(&work->work, cma_work_handler); + INIT_WORK(&work->work, cma_work_handler, work); work->old_state = CMA_ROUTE_QUERY; work->new_state = CMA_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; @@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) } work->id = id_priv; - INIT_WORK(&work->work, cma_work_handler); + INIT_WORK(&work->work, cma_work_handler, work); work->old_state = CMA_ADDR_QUERY; work->new_state = CMA_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index 1039ad57d53b..cf797d7aea09 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv, * thread asleep on the destroy_comp list vs. an object destroyed * here synchronously when the last reference is removed. */ -static void cm_work_handler(struct work_struct *_work) +static void cm_work_handler(void *arg) { - struct iwcm_work *work = container_of(_work, struct iwcm_work, work); + struct iwcm_work *work = arg; struct iw_cm_event levent; struct iwcm_id_private *cm_id_priv = work->cm_id; unsigned long flags; @@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, goto out; } - INIT_WORK(&work->work, cm_work_handler); + INIT_WORK(&work->work, cm_work_handler, work); work->cm_id = cm_id_priv; work->event = *iw_event; diff --git a/trunk/drivers/infiniband/core/mad.c b/trunk/drivers/infiniband/core/mad.c index 15f38d94b3a8..3f9c16232c4d 100644 --- a/trunk/drivers/infiniband/core/mad.c +++ b/trunk/drivers/infiniband/core/mad.c @@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent( static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); -static void timeout_sends(struct work_struct *work); -static void local_completions(struct work_struct *work); +static void timeout_sends(void *data); +static void local_completions(void *data); static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class); @@ -356,9 +356,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->done_list); INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); - INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); + INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); INIT_LIST_HEAD(&mad_agent_priv->local_list); - INIT_WORK(&mad_agent_priv->local_work, local_completions); + INIT_WORK(&mad_agent_priv->local_work, local_completions, + mad_agent_priv); atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); @@ -2197,12 +2198,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, /* * IB MAD completion callback */ -static void ib_mad_completion_handler(struct work_struct *work) +static void ib_mad_completion_handler(void *data) { struct ib_mad_port_private *port_priv; struct ib_wc wc; - port_priv = container_of(work, struct ib_mad_port_private, work); + port_priv = (struct ib_mad_port_private *)data; ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { @@ -2323,7 +2324,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, } EXPORT_SYMBOL(ib_cancel_mad); -static void local_completions(struct work_struct *work) +static void local_completions(void *data) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_local_private *local; @@ -2333,8 +2334,7 @@ static void local_completions(struct work_struct *work) struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; - mad_agent_priv = - container_of(work, struct ib_mad_agent_private, local_work); + mad_agent_priv = (struct ib_mad_agent_private *)data; spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { @@ -2434,15 +2434,14 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) return ret; } -static void timeout_sends(struct work_struct *work) +static void timeout_sends(void *data) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags, delay; - mad_agent_priv = container_of(work, struct ib_mad_agent_private, - timed_work.work); + mad_agent_priv = (struct ib_mad_agent_private *)data; mad_send_wc.vendor_err = 0; spin_lock_irqsave(&mad_agent_priv->lock, flags); @@ -2800,7 +2799,7 @@ static int ib_mad_port_open(struct ib_device *device, ret = -ENOMEM; goto error8; } - INIT_WORK(&port_priv->work, ib_mad_completion_handler); + INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_mad_port_list); diff --git a/trunk/drivers/infiniband/core/mad_priv.h b/trunk/drivers/infiniband/core/mad_priv.h index d5548e73e068..d06b59083f6e 100644 --- a/trunk/drivers/infiniband/core/mad_priv.h +++ b/trunk/drivers/infiniband/core/mad_priv.h @@ -102,7 +102,7 @@ struct ib_mad_agent_private { struct list_head send_list; struct list_head wait_list; struct list_head done_list; - struct delayed_work timed_work; + struct work_struct timed_work; unsigned long timeout; struct list_head local_list; struct work_struct local_work; diff --git a/trunk/drivers/infiniband/core/mad_rmpp.c b/trunk/drivers/infiniband/core/mad_rmpp.c index 3663fd7022be..1ef79d015a1e 100644 --- a/trunk/drivers/infiniband/core/mad_rmpp.c +++ b/trunk/drivers/infiniband/core/mad_rmpp.c @@ -45,8 +45,8 @@ enum rmpp_state { struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; - struct delayed_work timeout_work; - struct delayed_work cleanup_work; + struct work_struct timeout_work; + struct work_struct cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; @@ -233,10 +233,9 @@ static void nack_recv(struct ib_mad_agent_private *agent, } } -static void recv_timeout_handler(struct work_struct *work) +static void recv_timeout_handler(void *data) { - struct mad_rmpp_recv *rmpp_recv = - container_of(work, struct mad_rmpp_recv, timeout_work.work); + struct mad_rmpp_recv *rmpp_recv = data; struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; @@ -255,10 +254,9 @@ static void recv_timeout_handler(struct work_struct *work) ib_free_recv_mad(rmpp_wc); } -static void recv_cleanup_handler(struct work_struct *work) +static void recv_cleanup_handler(void *data) { - struct mad_rmpp_recv *rmpp_recv = - container_of(work, struct mad_rmpp_recv, cleanup_work.work); + struct mad_rmpp_recv *rmpp_recv = data; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); @@ -287,8 +285,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); - INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); - INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); + INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); + INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; atomic_set(&rmpp_recv->refcount, 1); diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c index e45afba75341..1706d3c7e95e 100644 --- a/trunk/drivers/infiniband/core/sa_query.c +++ b/trunk/drivers/infiniband/core/sa_query.c @@ -360,10 +360,9 @@ static void free_sm_ah(struct kref *kref) kfree(sm_ah); } -static void update_sm_ah(struct work_struct *work) +static void update_sm_ah(void *port_ptr) { - struct ib_sa_port *port = - container_of(work, struct ib_sa_port, update_task); + struct ib_sa_port *port = port_ptr; struct ib_sa_sm_ah *new_ah, *old_ah; struct ib_port_attr port_attr; struct ib_ah_attr ah_attr; @@ -993,7 +992,8 @@ static void ib_sa_add_one(struct ib_device *device) if (IS_ERR(sa_dev->port[i].agent)) goto err; - INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); + INIT_WORK(&sa_dev->port[i].update_task, + update_sm_ah, &sa_dev->port[i]); } ib_set_client_data(device, &sa_client, sa_dev); @@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device) goto err; for (i = 0; i <= e - s; ++i) - update_sm_ah(&sa_dev->port[i].update_task); + update_sm_ah(&sa_dev->port[i]); return; diff --git a/trunk/drivers/infiniband/core/uverbs_mem.c b/trunk/drivers/infiniband/core/uverbs_mem.c index db12cc0841df..efe147dbeb42 100644 --- a/trunk/drivers/infiniband/core/uverbs_mem.c +++ b/trunk/drivers/infiniband/core/uverbs_mem.c @@ -179,10 +179,9 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) up_write(¤t->mm->mmap_sem); } -static void ib_umem_account(struct work_struct *_work) +static void ib_umem_account(void *work_ptr) { - struct ib_umem_account_work *work = - container_of(_work, struct ib_umem_account_work, work); + struct ib_umem_account_work *work = work_ptr; down_write(&work->mm->mmap_sem); work->mm->locked_vm -= work->diff; @@ -217,7 +216,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) return; } - INIT_WORK(&work->work, ib_umem_account); + INIT_WORK(&work->work, ib_umem_account, work); work->mm = mm; work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_vq.c b/trunk/drivers/infiniband/hw/amso1100/c2_vq.c index 36620a22413c..40caeb5f41b4 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2_vq.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2_vq.c @@ -164,7 +164,7 @@ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) */ void *vq_repbuf_alloc(struct c2_dev *c2dev) { - return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC); + return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC); } /* diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_av.c b/trunk/drivers/infiniband/hw/ehca/ehca_av.c index 0d6e2c4bb245..214e2fdddeef 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_av.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_av.c @@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, ib_device); - av = kmem_cache_alloc(av_cache, GFP_KERNEL); + av = kmem_cache_alloc(av_cache, SLAB_KERNEL); if (!av) { ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", pd, ah_attr); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_cq.c b/trunk/drivers/infiniband/hw/ehca/ehca_cq.c index 93995b658d94..458fe19648a1 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_cq.c @@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) return ERR_PTR(-EINVAL); - my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL); + my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); if (!my_cq) { ehca_err(device, "Out of memory for ehca_cq struct device=%p", device); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_main.c b/trunk/drivers/infiniband/hw/ehca/ehca_main.c index cc47e4c13a18..3d1c1c535038 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_main.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_main.c @@ -108,7 +108,7 @@ static struct kmem_cache *ctblk_cache = NULL; void *ehca_alloc_fw_ctrlblock(void) { - void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); + void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL); if (!ret) ehca_gen_err("Out of memory for ctblk"); return ret; diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c b/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c index 0a5e2214cc5f..abce676c0ae0 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void) { struct ehca_mr *me; - me = kmem_cache_alloc(mr_cache, GFP_KERNEL); + me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); if (me) { memset(me, 0, sizeof(struct ehca_mr)); spin_lock_init(&me->mrlock); @@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void) { struct ehca_mw *me; - me = kmem_cache_alloc(mw_cache, GFP_KERNEL); + me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); if (me) { memset(me, 0, sizeof(struct ehca_mw)); spin_lock_init(&me->mwlock); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_pd.c b/trunk/drivers/infiniband/hw/ehca/ehca_pd.c index d5345e5b3cd6..2c3cdc6f7b39 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_pd.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_pd.c @@ -50,7 +50,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device, { struct ehca_pd *pd; - pd = kmem_cache_alloc(pd_cache, GFP_KERNEL); + pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL); if (!pd) { ehca_err(device, "device=%p context=%p out of memory", device, context); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c index c6c9cef203e3..8682aa50c707 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c @@ -450,7 +450,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, if (pd->uobject && udata) context = pd->uobject->context; - my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL); + my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL); if (!my_qp) { ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); return ERR_PTR(-ENOMEM); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c b/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c index 8536aeb96af8..413754b1d8a2 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -214,10 +214,9 @@ struct ipath_user_pages_work { unsigned long num_pages; }; -static void user_pages_account(struct work_struct *_work) +static void user_pages_account(void *ptr) { - struct ipath_user_pages_work *work = - container_of(_work, struct ipath_user_pages_work, work); + struct ipath_user_pages_work *work = ptr; down_write(&work->mm->mmap_sem); work->mm->locked_vm -= work->num_pages; @@ -243,7 +242,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) goto bail; - INIT_WORK(&work->work, user_pages_account); + INIT_WORK(&work->work, user_pages_account, work); work->mm = mm; work->num_pages = num_pages; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_av.c b/trunk/drivers/infiniband/hw/mthca/mthca_av.c index 27caf3b0648a..57cdc1bc5f50 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_av.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_av.c @@ -189,7 +189,7 @@ int mthca_create_ah(struct mthca_dev *dev, on_hca_fail: if (ah->type == MTHCA_AH_PCI_POOL) { ah->av = pci_pool_alloc(dev->av_table.pool, - GFP_ATOMIC, &ah->avdma); + SLAB_ATOMIC, &ah->avdma); if (!ah->av) return -ENOMEM; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_catas.c b/trunk/drivers/infiniband/hw/mthca/mthca_catas.c index e948158a28d9..cd044ea2dfa4 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_catas.c @@ -57,7 +57,7 @@ static int catas_reset_disable; module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); -static void catas_reset(struct work_struct *work) +static void catas_reset(void *work_ptr) { struct mthca_dev *dev, *tmpdev; LIST_HEAD(tlist); @@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) int __init mthca_catas_init(void) { - INIT_WORK(&catas_work, catas_reset); + INIT_WORK(&catas_work, catas_reset, NULL); catas_wq = create_singlethread_workqueue("mthca_catas"); if (!catas_wq) diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h index 99547996aba2..f2b61851a49c 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h @@ -136,11 +136,11 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; - struct delayed_work pkey_task; - struct delayed_work mcast_task; + struct work_struct pkey_task; + struct work_struct mcast_task; struct work_struct flush_task; struct work_struct restart_task; - struct delayed_work ah_reap_task; + struct work_struct ah_reap_task; struct ib_device *ca; u8 port; @@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn); -void ipoib_reap_ah(struct work_struct *work); +void ipoib_reap_ah(void *dev_ptr); void ipoib_flush_paths(struct net_device *dev); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); -void ipoib_ib_dev_flush(struct work_struct *work); +void ipoib_ib_dev_flush(void *dev); void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); @@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); void ipoib_dev_cleanup(struct net_device *dev); -void ipoib_mcast_join_task(struct work_struct *work); +void ipoib_mcast_join_task(void *dev_ptr); void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); -void ipoib_mcast_restart_task(struct work_struct *work); +void ipoib_mcast_restart_task(void *dev_ptr); int ipoib_mcast_start_thread(struct net_device *dev); int ipoib_mcast_stop_thread(struct net_device *dev, int flush); @@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); -void ipoib_pkey_poll(struct work_struct *work); +void ipoib_pkey_poll(void *dev); int ipoib_pkey_dev_delay_open(struct net_device *dev); #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c index f10fba5d3265..8bf5e9ec7c95 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -400,11 +400,10 @@ static void __ipoib_reap_ah(struct net_device *dev) spin_unlock_irq(&priv->tx_lock); } -void ipoib_reap_ah(struct work_struct *work) +void ipoib_reap_ah(void *dev_ptr) { - struct ipoib_dev_priv *priv = - container_of(work, struct ipoib_dev_priv, ah_reap_task.work); - struct net_device *dev = priv->dev; + struct net_device *dev = dev_ptr; + struct ipoib_dev_priv *priv = netdev_priv(dev); __ipoib_reap_ah(dev); @@ -614,11 +613,10 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) return 0; } -void ipoib_ib_dev_flush(struct work_struct *work) +void ipoib_ib_dev_flush(void *_dev) { - struct ipoib_dev_priv *cpriv, *priv = - container_of(work, struct ipoib_dev_priv, flush_task); - struct net_device *dev = priv->dev; + struct net_device *dev = (struct net_device *)_dev; + struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); @@ -640,14 +638,14 @@ void ipoib_ib_dev_flush(struct work_struct *work) */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_ib_dev_up(dev); - ipoib_mcast_restart_task(&priv->restart_task); + ipoib_mcast_restart_task(dev); } mutex_lock(&priv->vlan_mutex); /* Flush any child interfaces too */ list_for_each_entry(cpriv, &priv->child_intfs, list) - ipoib_ib_dev_flush(&cpriv->flush_task); + ipoib_ib_dev_flush(cpriv->dev); mutex_unlock(&priv->vlan_mutex); } @@ -674,11 +672,10 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * change async notification is available. */ -void ipoib_pkey_poll(struct work_struct *work) +void ipoib_pkey_poll(void *dev_ptr) { - struct ipoib_dev_priv *priv = - container_of(work, struct ipoib_dev_priv, pkey_task.work); - struct net_device *dev = priv->dev; + struct net_device *dev = dev_ptr; + struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_pkey_dev_check_presence(dev); diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index c09280243726..5ba3154320b4 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev) INIT_LIST_HEAD(&priv->dead_ahs); INIT_LIST_HEAD(&priv->multicast_list); - INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); - INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); - INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); - INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); - INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); + INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); + INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); + INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); + INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); + INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); } struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index b04b72ca32ed..d282d65e3ee0 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -399,8 +399,7 @@ static void ipoib_mcast_join_complete(int status, mcast->backoff = 1; mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, - &priv->mcast_task, 0); + queue_work(ipoib_workqueue, &priv->mcast_task); mutex_unlock(&mcast_mutex); complete(&mcast->done); return; @@ -436,8 +435,7 @@ static void ipoib_mcast_join_complete(int status, if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { if (status == -ETIMEDOUT) - queue_delayed_work(ipoib_workqueue, &priv->mcast_task, - 0); + queue_work(ipoib_workqueue, &priv->mcast_task); else queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); @@ -519,11 +517,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, mcast->query_id = ret; } -void ipoib_mcast_join_task(struct work_struct *work) +void ipoib_mcast_join_task(void *dev_ptr) { - struct ipoib_dev_priv *priv = - container_of(work, struct ipoib_dev_priv, mcast_task.work); - struct net_device *dev = priv->dev; + struct net_device *dev = dev_ptr; + struct ipoib_dev_priv *priv = netdev_priv(dev); if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; @@ -613,7 +610,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); + queue_work(ipoib_workqueue, &priv->mcast_task); mutex_unlock(&mcast_mutex); spin_lock_irq(&priv->lock); @@ -821,11 +818,10 @@ void ipoib_mcast_dev_flush(struct net_device *dev) } } -void ipoib_mcast_restart_task(struct work_struct *work) +void ipoib_mcast_restart_task(void *dev_ptr) { - struct ipoib_dev_priv *priv = - container_of(work, struct ipoib_dev_priv, restart_task); - struct net_device *dev = priv->dev; + struct net_device *dev = dev_ptr; + struct ipoib_dev_priv *priv = netdev_priv(dev); struct dev_mc_list *mclist; struct ipoib_mcast *mcast, *tmcast; LIST_HEAD(remove_list); diff --git a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c index 693b77002897..18a000034996 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c @@ -48,7 +48,7 @@ static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); -static void iser_comp_error_worker(struct work_struct *work); +static void iser_comp_error_worker(void *data); static void iser_cq_event_callback(struct ib_event *cause, void *context) { @@ -480,7 +480,8 @@ int iser_conn_init(struct iser_conn **ibconn) init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); - INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); + INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, + ib_conn); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); @@ -753,10 +754,9 @@ int iser_post_send(struct iser_desc *tx_desc) return ret_val; } -static void iser_comp_error_worker(struct work_struct *work) +static void iser_comp_error_worker(void *data) { - struct iser_conn *ib_conn = - container_of(work, struct iser_conn, comperror_work); + struct iser_conn *ib_conn = data; /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index a6289595557b..64ab5fc7cca3 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -390,10 +390,9 @@ static void srp_disconnect_target(struct srp_target_port *target) wait_for_completion(&target->done); } -static void srp_remove_work(struct work_struct *work) +static void srp_remove_work(void *target_ptr) { - struct srp_target_port *target = - container_of(work, struct srp_target_port, work); + struct srp_target_port *target = target_ptr; spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_DEAD) { @@ -576,7 +575,7 @@ static int srp_reconnect_target(struct srp_target_port *target) spin_lock_irq(target->scsi_host->host_lock); if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; - INIT_WORK(&target->work, srp_remove_work); + INIT_WORK(&target->work, srp_remove_work, target); schedule_work(&target->work); } spin_unlock_irq(target->scsi_host->host_lock); diff --git a/trunk/drivers/input/gameport/gameport.c b/trunk/drivers/input/gameport/gameport.c index 79dfb4b25c97..a0af97efe6ac 100644 --- a/trunk/drivers/input/gameport/gameport.c +++ b/trunk/drivers/input/gameport/gameport.c @@ -23,7 +23,6 @@ #include #include /* HZ */ #include -#include /*#include */ diff --git a/trunk/drivers/input/keyboard/atkbd.c b/trunk/drivers/input/keyboard/atkbd.c index 8451b29a3db5..cbb93669d1ce 100644 --- a/trunk/drivers/input/keyboard/atkbd.c +++ b/trunk/drivers/input/keyboard/atkbd.c @@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd) * interrupt context. */ -static void atkbd_event_work(struct work_struct *work) +static void atkbd_event_work(void *data) { - struct atkbd *atkbd = container_of(work, struct atkbd, event_work); + struct atkbd *atkbd = data; mutex_lock(&atkbd->event_mutex); @@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) atkbd->dev = dev; ps2_init(&atkbd->ps2dev, serio); - INIT_WORK(&atkbd->event_work, atkbd_event_work); + INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); mutex_init(&atkbd->event_mutex); switch (serio->id.type) { diff --git a/trunk/drivers/input/keyboard/lkkbd.c b/trunk/drivers/input/keyboard/lkkbd.c index b7f049b45b6b..979b93e33da7 100644 --- a/trunk/drivers/input/keyboard/lkkbd.c +++ b/trunk/drivers/input/keyboard/lkkbd.c @@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code, * were in. */ static void -lkkbd_reinit (struct work_struct *work) +lkkbd_reinit (void *data) { - struct lkkbd *lk = container_of(work, struct lkkbd, tq); + struct lkkbd *lk = data; int division; unsigned char leds_on = 0; unsigned char leds_off = 0; @@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv) lk->serio = serio; lk->dev = input_dev; - INIT_WORK (&lk->tq, lkkbd_reinit); + INIT_WORK (&lk->tq, lkkbd_reinit, lk); lk->bell_volume = bell_volume; lk->keyclick_volume = keyclick_volume; lk->ctrlclick_volume = ctrlclick_volume; diff --git a/trunk/drivers/input/keyboard/sunkbd.c b/trunk/drivers/input/keyboard/sunkbd.c index 6cd887c5eb0a..cac4781103c3 100644 --- a/trunk/drivers/input/keyboard/sunkbd.c +++ b/trunk/drivers/input/keyboard/sunkbd.c @@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) * were in. */ -static void sunkbd_reinit(struct work_struct *work) +static void sunkbd_reinit(void *data) { - struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); + struct sunkbd *sunkbd = data; wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); @@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv) sunkbd->serio = serio; sunkbd->dev = input_dev; init_waitqueue_head(&sunkbd->wait); - INIT_WORK(&sunkbd->tq, sunkbd_reinit); + INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); serio_set_drvdata(serio, sunkbd); diff --git a/trunk/drivers/input/misc/hp_sdc_rtc.c b/trunk/drivers/input/misc/hp_sdc_rtc.c index 31d5a13bfd6b..ab4da79ee560 100644 --- a/trunk/drivers/input/misc/hp_sdc_rtc.c +++ b/trunk/drivers/input/misc/hp_sdc_rtc.c @@ -695,9 +695,7 @@ static int __init hp_sdc_rtc_init(void) if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr))) return ret; - if (misc_register(&hp_sdc_rtc_dev) != 0) - printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n"); - + misc_register(&hp_sdc_rtc_dev); create_proc_read_entry ("driver/rtc", 0, NULL, hp_sdc_rtc_read_proc, NULL); diff --git a/trunk/drivers/input/mouse/psmouse-base.c b/trunk/drivers/input/mouse/psmouse-base.c index 52bb2226ce2f..6f9b2c7cc9c2 100644 --- a/trunk/drivers/input/mouse/psmouse-base.c +++ b/trunk/drivers/input/mouse/psmouse-base.c @@ -888,10 +888,9 @@ static int psmouse_poll(struct psmouse *psmouse) * psmouse_resync() attempts to re-validate current protocol. */ -static void psmouse_resync(struct work_struct *work) +static void psmouse_resync(void *p) { - struct psmouse *parent = NULL, *psmouse = - container_of(work, struct psmouse, resync_work); + struct psmouse *psmouse = p, *parent = NULL; struct serio *serio = psmouse->ps2dev.serio; psmouse_ret_t rc = PSMOUSE_GOOD_DATA; int failed = 0, enabled = 0; @@ -1122,7 +1121,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv) goto out; ps2_init(&psmouse->ps2dev, serio); - INIT_WORK(&psmouse->resync_work, psmouse_resync); + INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); psmouse->dev = input_dev; snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); diff --git a/trunk/drivers/input/serio/libps2.c b/trunk/drivers/input/serio/libps2.c index b3e84d3bb7f7..e5b1b60757bb 100644 --- a/trunk/drivers/input/serio/libps2.c +++ b/trunk/drivers/input/serio/libps2.c @@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command); * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) */ -static void ps2_execute_scheduled_command(struct work_struct *work) +static void ps2_execute_scheduled_command(void *data) { - struct ps2work *ps2work = container_of(work, struct ps2work, work); + struct ps2work *ps2work = data; ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); kfree(ps2work); @@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman ps2work->ps2dev = ps2dev; ps2work->command = command; memcpy(ps2work->param, param, send); - INIT_WORK(&ps2work->work, ps2_execute_scheduled_command); + INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); if (!schedule_work(&ps2work->work)) { kfree(ps2work); diff --git a/trunk/drivers/input/serio/serio.c b/trunk/drivers/input/serio/serio.c index 5f1d4032fd57..211943f85cb6 100644 --- a/trunk/drivers/input/serio/serio.c +++ b/trunk/drivers/input/serio/serio.c @@ -35,7 +35,6 @@ #include #include #include -#include MODULE_AUTHOR("Vojtech Pavlik "); MODULE_DESCRIPTION("Serio abstraction core"); diff --git a/trunk/drivers/input/touchscreen/ads7846.c b/trunk/drivers/input/touchscreen/ads7846.c index 0517c7387d67..f56d6a0f0624 100644 --- a/trunk/drivers/input/touchscreen/ads7846.c +++ b/trunk/drivers/input/touchscreen/ads7846.c @@ -189,7 +189,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) { struct spi_device *spi = to_spi_device(dev); struct ads7846 *ts = dev_get_drvdata(dev); - struct ser_req *req = kzalloc(sizeof *req, GFP_KERNEL); + struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); int status; int sample; int i; diff --git a/trunk/drivers/isdn/act2000/capi.c b/trunk/drivers/isdn/act2000/capi.c index 946c38cf6f8a..6ae6eb322111 100644 --- a/trunk/drivers/isdn/act2000/capi.c +++ b/trunk/drivers/isdn/act2000/capi.c @@ -627,10 +627,8 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { } void -actcapi_dispatch(struct work_struct *work) +actcapi_dispatch(act2000_card *card) { - struct act2000_card *card = - container_of(work, struct act2000_card, rcv_tq); struct sk_buff *skb; actcapi_msg *msg; __u16 ccmd; diff --git a/trunk/drivers/isdn/act2000/capi.h b/trunk/drivers/isdn/act2000/capi.h index e55f6a931f66..49f453c53c64 100644 --- a/trunk/drivers/isdn/act2000/capi.h +++ b/trunk/drivers/isdn/act2000/capi.h @@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); -extern void actcapi_dispatch(struct work_struct *); +extern void actcapi_dispatch(act2000_card *); #ifdef DEBUG_MSG extern void actcapi_debug_msg(struct sk_buff *skb, int); #else diff --git a/trunk/drivers/isdn/act2000/module.c b/trunk/drivers/isdn/act2000/module.c index 90593e2ef872..d89dcde4eade 100644 --- a/trunk/drivers/isdn/act2000/module.c +++ b/trunk/drivers/isdn/act2000/module.c @@ -192,11 +192,8 @@ act2000_set_msn(act2000_card *card, char *eazmsn) } static void -act2000_transmit(struct work_struct *work) +act2000_transmit(struct act2000_card *card) { - struct act2000_card *card = - container_of(work, struct act2000_card, snd_tq); - switch (card->bus) { case ACT2000_BUS_ISA: act2000_isa_send(card); @@ -210,11 +207,8 @@ act2000_transmit(struct work_struct *work) } static void -act2000_receive(struct work_struct *work) +act2000_receive(struct act2000_card *card) { - struct act2000_card *card = - container_of(work, struct act2000_card, poll_tq); - switch (card->bus) { case ACT2000_BUS_ISA: act2000_isa_receive(card); @@ -233,7 +227,7 @@ act2000_poll(unsigned long data) act2000_card * card = (act2000_card *)data; unsigned long flags; - act2000_receive(&card->poll_tq); + act2000_receive(card); spin_lock_irqsave(&card->lock, flags); mod_timer(&card->ptimer, jiffies+3); spin_unlock_irqrestore(&card->lock, flags); @@ -584,9 +578,9 @@ act2000_alloccard(int bus, int port, int irq, char *id) skb_queue_head_init(&card->sndq); skb_queue_head_init(&card->rcvq); skb_queue_head_init(&card->ackq); - INIT_WORK(&card->snd_tq, act2000_transmit); - INIT_WORK(&card->rcv_tq, actcapi_dispatch); - INIT_WORK(&card->poll_tq, act2000_receive); + INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); + INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); + INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); init_timer(&card->ptimer); card->interface.owner = THIS_MODULE; card->interface.channels = ACT2000_BCH; diff --git a/trunk/drivers/isdn/capi/kcapi.c b/trunk/drivers/isdn/capi/kcapi.c index 783a25526315..8c4fcb9027b3 100644 --- a/trunk/drivers/isdn/capi/kcapi.c +++ b/trunk/drivers/isdn/capi/kcapi.c @@ -208,10 +208,9 @@ static void notify_down(u32 contr) } } -static void notify_handler(struct work_struct *work) +static void notify_handler(void *data) { - struct capi_notifier *np = - container_of(work, struct capi_notifier, work); + struct capi_notifier *np = data; switch (np->cmd) { case KCI_CONTRUP: @@ -236,7 +235,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) if (!np) return -ENOMEM; - INIT_WORK(&np->work, notify_handler); + INIT_WORK(&np->work, notify_handler, np); np->cmd = cmd; np->controller = controller; np->applid = applid; @@ -249,11 +248,10 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) /* -------- Receiver ------------------------------------------ */ -static void recv_handler(struct work_struct *work) +static void recv_handler(void *_ap) { struct sk_buff *skb; - struct capi20_appl *ap = - container_of(work, struct capi20_appl, recv_work); + struct capi20_appl *ap = (struct capi20_appl *) _ap; if ((!ap) || (ap->release_in_progress)) return; @@ -529,7 +527,7 @@ u16 capi20_register(struct capi20_appl *ap) ap->callback = NULL; init_MUTEX(&ap->recv_sem); skb_queue_head_init(&ap->recv_queue); - INIT_WORK(&ap->recv_work, recv_handler); + INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); ap->release_in_progress = 0; write_unlock_irqrestore(&application_lock, flags); diff --git a/trunk/drivers/isdn/gigaset/bas-gigaset.c b/trunk/drivers/isdn/gigaset/bas-gigaset.c index 63b629b1cdb2..0c937325a1b3 100644 --- a/trunk/drivers/isdn/gigaset/bas-gigaset.c +++ b/trunk/drivers/isdn/gigaset/bas-gigaset.c @@ -572,7 +572,7 @@ static int atread_submit(struct cardstate *cs, int timeout) ucs->rcvbuf, ucs->rcvbuf_size, read_ctrl_callback, cs->inbuf); - if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) { + if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) { update_basstate(ucs, 0, BS_ATRDPEND); dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", get_usb_rcmsg(ret)); @@ -747,7 +747,7 @@ static void read_int_callback(struct urb *urb) check_pending(ucs); resubmit: - rc = usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, SLAB_ATOMIC); if (unlikely(rc != 0 && rc != -ENODEV)) { dev_err(cs->dev, "could not resubmit interrupt URB: %s\n", get_usb_rcmsg(rc)); @@ -807,7 +807,7 @@ static void read_iso_callback(struct urb *urb) urb->number_of_packets = BAS_NUMFRAMES; gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__); - rc = usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, SLAB_ATOMIC); if (unlikely(rc != 0 && rc != -ENODEV)) { dev_err(bcs->cs->dev, "could not resubmit isochronous read " @@ -900,7 +900,7 @@ static int starturbs(struct bc_state *bcs) } dump_urb(DEBUG_ISO, "Initial isoc read", urb); - if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0) + if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) goto error; } @@ -935,7 +935,7 @@ static int starturbs(struct bc_state *bcs) /* submit two URBs, keep third one */ for (k = 0; k < 2; ++k) { dump_urb(DEBUG_ISO, "Initial isoc write", urb); - rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC); + rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC); if (rc != 0) goto error; } @@ -1042,7 +1042,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx) return 0; /* no data to send */ urb->number_of_packets = nframe; - rc = usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, SLAB_ATOMIC); if (unlikely(rc)) { if (rc == -ENODEV) /* device removed - give up silently */ @@ -1341,7 +1341,7 @@ static void read_iso_tasklet(unsigned long data) urb->dev = bcs->cs->hw.bas->udev; urb->transfer_flags = URB_ISO_ASAP; urb->number_of_packets = BAS_NUMFRAMES; - rc = usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, SLAB_ATOMIC); if (unlikely(rc != 0 && rc != -ENODEV)) { dev_err(cs->dev, "could not resubmit isochronous read URB: %s\n", @@ -1458,7 +1458,7 @@ static void write_ctrl_callback(struct urb *urb) ucs->retry_ctrl); /* urb->dev is clobbered by USB subsystem */ urb->dev = ucs->udev; - rc = usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, SLAB_ATOMIC); if (unlikely(rc)) { dev_err(&ucs->interface->dev, "could not resubmit request 0x%02x: %s\n", @@ -1517,7 +1517,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout) (unsigned char*) &ucs->dr_ctrl, NULL, 0, write_ctrl_callback, ucs); ucs->retry_ctrl = 0; - ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC); + ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC); if (unlikely(ret)) { dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n", req, get_usb_rcmsg(ret)); @@ -1763,7 +1763,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len) usb_sndctrlpipe(ucs->udev, 0), (unsigned char*) &ucs->dr_cmd_out, buf, len, write_command_callback, cs); - rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC); + rc = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC); if (unlikely(rc)) { update_basstate(ucs, 0, BS_ATWRPEND); dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n", @@ -2218,21 +2218,21 @@ static int gigaset_probe(struct usb_interface *interface, * - three for the different uses of the default control pipe * - three for each isochronous pipe */ - if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) || - !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) || - !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) || - !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL))) + if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) || + !(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) || + !(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) || + !(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL))) goto allocerr; for (j = 0; j < 2; ++j) { ubc = cs->bcs[j].hw.bas; for (i = 0; i < BAS_OUTURBS; ++i) if (!(ubc->isoouturbs[i].urb = - usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL))) + usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL))) goto allocerr; for (i = 0; i < BAS_INURBS; ++i) if (!(ubc->isoinurbs[i] = - usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL))) + usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL))) goto allocerr; } @@ -2246,7 +2246,7 @@ static int gigaset_probe(struct usb_interface *interface, (endpoint->bEndpointAddress) & 0x0f), ucs->int_in_buf, 3, read_int_callback, cs, endpoint->bInterval); - if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) { + if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) { dev_err(cs->dev, "could not submit interrupt URB: %s\n", get_usb_rcmsg(rc)); goto error; diff --git a/trunk/drivers/isdn/gigaset/usb-gigaset.c b/trunk/drivers/isdn/gigaset/usb-gigaset.c index 04f2ad7ba8b0..5ebf49ac9b23 100644 --- a/trunk/drivers/isdn/gigaset/usb-gigaset.c +++ b/trunk/drivers/isdn/gigaset/usb-gigaset.c @@ -410,7 +410,7 @@ static void gigaset_read_int_callback(struct urb *urb) if (resubmit) { spin_lock_irqsave(&cs->lock, flags); - r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV; + r = cs->connected ? usb_submit_urb(urb, SLAB_ATOMIC) : -ENODEV; spin_unlock_irqrestore(&cs->lock, flags); if (r) dev_err(cs->dev, "error %d when resubmitting urb.\n", @@ -486,7 +486,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) atomic_set(&ucs->busy, 1); spin_lock_irqsave(&cs->lock, flags); - status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; + status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC) : -ENODEV; spin_unlock_irqrestore(&cs->lock, flags); if (status) { @@ -664,7 +664,7 @@ static int write_modem(struct cardstate *cs) ucs->bulk_out_endpointAddr & 0x0f), ucs->bulk_out_buffer, count, gigaset_write_bulk_callback, cs); - ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); + ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC); } else { ret = -ENODEV; } @@ -763,7 +763,7 @@ static int gigaset_probe(struct usb_interface *interface, goto error; } - ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL); + ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL); if (!ucs->bulk_out_urb) { dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n"); retval = -ENOMEM; @@ -774,7 +774,7 @@ static int gigaset_probe(struct usb_interface *interface, atomic_set(&ucs->busy, 0); - ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); + ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL); if (!ucs->read_urb) { dev_err(cs->dev, "No free urbs available\n"); retval = -ENOMEM; @@ -797,7 +797,7 @@ static int gigaset_probe(struct usb_interface *interface, gigaset_read_int_callback, cs->inbuf + 0, endpoint->bInterval); - retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); + retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL); if (retval) { dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval); goto error; diff --git a/trunk/drivers/isdn/hisax/amd7930_fn.c b/trunk/drivers/isdn/hisax/amd7930_fn.c index 3b19caeba258..bec59010bc66 100644 --- a/trunk/drivers/isdn/hisax/amd7930_fn.c +++ b/trunk/drivers/isdn/hisax/amd7930_fn.c @@ -232,10 +232,9 @@ Amd7930_new_ph(struct IsdnCardState *cs) static void -Amd7930_bh(struct work_struct *work) +Amd7930_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); + struct PStack *stptr; if (!cs) @@ -790,7 +789,7 @@ Amd7930_init(struct IsdnCardState *cs) void __devinit setup_Amd7930(struct IsdnCardState *cs) { - INIT_WORK(&cs->tqueue, Amd7930_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); diff --git a/trunk/drivers/isdn/hisax/config.c b/trunk/drivers/isdn/hisax/config.c index cede72cdbb31..785b08554fca 100644 --- a/trunk/drivers/isdn/hisax/config.c +++ b/trunk/drivers/isdn/hisax/config.c @@ -1137,6 +1137,7 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow cs->tx_skb = NULL; cs->tx_cnt = 0; cs->event = 0; + cs->tqueue.data = cs; skb_queue_head_init(&cs->rq); skb_queue_head_init(&cs->sq); @@ -1553,7 +1554,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg); static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); static void hisax_bc_close(struct BCState *bcs); -static void hisax_bh(struct work_struct *work); +static void hisax_bh(struct IsdnCardState *cs); static void EChannel_proc_rcv(struct hisax_d_if *d_if); int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], @@ -1585,7 +1586,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], hisax_d_if->cs = cs; cs->hw.hisax_d_if = hisax_d_if; cs->cardmsg = hisax_cardmsg; - INIT_WORK(&cs->tqueue, hisax_bh); + INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; for (i = 0; i < 2; i++) { cs->bcs[i].BC_SetStack = hisax_bc_setstack; @@ -1617,10 +1618,8 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event) schedule_work(&cs->tqueue); } -static void hisax_bh(struct work_struct *work) +static void hisax_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); struct PStack *st; int pr; diff --git a/trunk/drivers/isdn/hisax/hfc4s8s_l1.c b/trunk/drivers/isdn/hisax/hfc4s8s_l1.c index de9b1a4d6bac..d852c9d998b2 100644 --- a/trunk/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/trunk/drivers/isdn/hisax/hfc4s8s_l1.c @@ -1083,9 +1083,8 @@ tx_b_frame(struct hfc4s8s_btype *bch) /* bottom half handler for interrupt */ /*************************************/ static void -hfc4s8s_bh(struct work_struct *work) +hfc4s8s_bh(hfc4s8s_hw * hw) { - hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue); u_char b; struct hfc4s8s_l1 *l1p; volatile u_char *fifo_stat; @@ -1551,7 +1550,7 @@ setup_instance(hfc4s8s_hw * hw) goto out; } - INIT_WORK(&hw->tqueue, hfc4s8s_bh); + INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); if (request_irq (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { diff --git a/trunk/drivers/isdn/hisax/hfc_2bds0.c b/trunk/drivers/isdn/hisax/hfc_2bds0.c index 8d9864453a23..6360e8214720 100644 --- a/trunk/drivers/isdn/hisax/hfc_2bds0.c +++ b/trunk/drivers/isdn/hisax/hfc_2bds0.c @@ -549,11 +549,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs) } static void -hfcd_bh(struct work_struct *work) +hfcd_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); - + if (!cs) + return; if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { switch (cs->dc.hfcd.ph_state) { case (0): @@ -1073,5 +1072,5 @@ set_cs_func(struct IsdnCardState *cs) cs->dbusytimer.function = (void *) hfc_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); - INIT_WORK(&cs->tqueue, hfcd_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); } diff --git a/trunk/drivers/isdn/hisax/hfc_pci.c b/trunk/drivers/isdn/hisax/hfc_pci.c index 5db0a85b827f..93f60b563515 100644 --- a/trunk/drivers/isdn/hisax/hfc_pci.c +++ b/trunk/drivers/isdn/hisax/hfc_pci.c @@ -1506,10 +1506,8 @@ setstack_2b(struct PStack *st, struct BCState *bcs) /* handle L1 state changes */ /***************************/ static void -hfcpci_bh(struct work_struct *work) +hfcpci_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); u_long flags; // struct PStack *stptr; @@ -1724,7 +1722,7 @@ setup_hfcpci(struct IsdnCard *card) Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); /* At this point the needed PCI config is done */ /* fifos are still not enabled */ - INIT_WORK(&cs->tqueue, hfcpci_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); cs->setstack_d = setstack_hfcpci; cs->BC_Send_Data = &hfcpci_send_data; cs->readisac = NULL; diff --git a/trunk/drivers/isdn/hisax/hfc_sx.c b/trunk/drivers/isdn/hisax/hfc_sx.c index 4fd09d21a27f..954d1536db1f 100644 --- a/trunk/drivers/isdn/hisax/hfc_sx.c +++ b/trunk/drivers/isdn/hisax/hfc_sx.c @@ -1251,10 +1251,8 @@ setstack_2b(struct PStack *st, struct BCState *bcs) /* handle L1 state changes */ /***************************/ static void -hfcsx_bh(struct work_struct *work) +hfcsx_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); u_long flags; if (!cs) @@ -1501,7 +1499,7 @@ setup_hfcsx(struct IsdnCard *card) cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); - INIT_WORK(&cs->tqueue, hfcsx_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); cs->readisac = NULL; cs->writeisac = NULL; cs->readisacfifo = NULL; diff --git a/trunk/drivers/isdn/hisax/icc.c b/trunk/drivers/isdn/hisax/icc.c index 682cac32f259..da706925d54d 100644 --- a/trunk/drivers/isdn/hisax/icc.c +++ b/trunk/drivers/isdn/hisax/icc.c @@ -77,10 +77,8 @@ icc_new_ph(struct IsdnCardState *cs) } static void -icc_bh(struct work_struct *work) +icc_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (!cs) @@ -676,7 +674,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs) void __devinit setup_icc(struct IsdnCardState *cs) { - INIT_WORK(&cs->tqueue, icc_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); diff --git a/trunk/drivers/isdn/hisax/isac.c b/trunk/drivers/isdn/hisax/isac.c index 4e9f23803dae..282f349408bc 100644 --- a/trunk/drivers/isdn/hisax/isac.c +++ b/trunk/drivers/isdn/hisax/isac.c @@ -81,10 +81,8 @@ isac_new_ph(struct IsdnCardState *cs) } static void -isac_bh(struct work_struct *work) +isac_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (!cs) @@ -676,7 +674,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs) void __devinit setup_isac(struct IsdnCardState *cs) { - INIT_WORK(&cs->tqueue, isac_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); diff --git a/trunk/drivers/isdn/hisax/isar.c b/trunk/drivers/isdn/hisax/isar.c index 6f1a6583b17d..674af673ff96 100644 --- a/trunk/drivers/isdn/hisax/isar.c +++ b/trunk/drivers/isdn/hisax/isar.c @@ -437,10 +437,8 @@ extern void BChannel_bh(struct BCState *); #define B_LL_OK 10 static void -isar_bh(struct work_struct *work) +isar_bh(struct BCState *bcs) { - struct BCState *bcs = container_of(work, struct BCState, tqueue); - BChannel_bh(bcs); if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); @@ -1582,7 +1580,7 @@ isar_setup(struct IsdnCardState *cs) cs->bcs[i].mode = 0; cs->bcs[i].hw.isar.dpath = i + 1; modeisar(&cs->bcs[i], 0, 0); - INIT_WORK(&cs->bcs[i].tqueue, isar_bh); + INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); } } diff --git a/trunk/drivers/isdn/hisax/isdnhdlc.h b/trunk/drivers/isdn/hisax/isdnhdlc.h index 5655b5f9c48e..269315988dc8 100644 --- a/trunk/drivers/isdn/hisax/isdnhdlc.h +++ b/trunk/drivers/isdn/hisax/isdnhdlc.h @@ -41,10 +41,10 @@ struct isdnhdlc_vars { unsigned char shift_reg; unsigned char ffvalue; - unsigned int data_received:1; // set if transferring data - unsigned int dchannel:1; // set if D channel (send idle instead of flags) - unsigned int do_adapt56:1; // set if 56K adaptation - unsigned int do_closing:1; // set if in closing phase (need to send CRC + flag + int data_received:1; // set if transferring data + int dchannel:1; // set if D channel (send idle instead of flags) + int do_adapt56:1; // set if 56K adaptation + int do_closing:1; // set if in closing phase (need to send CRC + flag }; diff --git a/trunk/drivers/isdn/hisax/isdnl1.c b/trunk/drivers/isdn/hisax/isdnl1.c index a14204ec88ee..bab356886483 100644 --- a/trunk/drivers/isdn/hisax/isdnl1.c +++ b/trunk/drivers/isdn/hisax/isdnl1.c @@ -315,10 +315,8 @@ BChannel_proc_ack(struct BCState *bcs) } void -BChannel_bh(struct work_struct *work) +BChannel_bh(struct BCState *bcs) { - struct BCState *bcs = container_of(work, struct BCState, tqueue); - if (!bcs) return; if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) @@ -364,7 +362,7 @@ init_bcstate(struct IsdnCardState *cs, int bc) bcs->cs = cs; bcs->channel = bc; - INIT_WORK(&bcs->tqueue, BChannel_bh); + INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); spin_lock_init(&bcs->aclock); bcs->BC_SetStack = NULL; bcs->BC_Close = NULL; diff --git a/trunk/drivers/isdn/hisax/w6692.c b/trunk/drivers/isdn/hisax/w6692.c index 3aeceaf9769e..1655341797a9 100644 --- a/trunk/drivers/isdn/hisax/w6692.c +++ b/trunk/drivers/isdn/hisax/w6692.c @@ -101,10 +101,8 @@ W6692_new_ph(struct IsdnCardState *cs) } static void -W6692_bh(struct work_struct *work) +W6692_bh(struct IsdnCardState *cs) { - struct IsdnCardState *cs = - container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (!cs) @@ -1072,7 +1070,7 @@ setup_w6692(struct IsdnCard *card) id_list[cs->subtyp].card_name, cs->irq, cs->hw.w6692.iobase); - INIT_WORK(&cs->tqueue, W6692_bh); + INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); cs->readW6692 = &ReadW6692; cs->writeW6692 = &WriteW6692; cs->readisacfifo = &ReadISACfifo; diff --git a/trunk/drivers/isdn/hysdn/boardergo.c b/trunk/drivers/isdn/hysdn/boardergo.c index a1206498a1cf..82e42a80dc4b 100644 --- a/trunk/drivers/isdn/hysdn/boardergo.c +++ b/trunk/drivers/isdn/hysdn/boardergo.c @@ -71,9 +71,8 @@ ergo_interrupt(int intno, void *dev_id) /* may be queued from everywhere (interrupts included). */ /******************************************************************************/ static void -ergo_irq_bh(struct work_struct *ugli_api) +ergo_irq_bh(hysdn_card * card) { - hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue); tErgDpram *dpr; int again; unsigned long flags; @@ -443,7 +442,7 @@ ergo_inithardware(hysdn_card * card) card->writebootseq = ergo_writebootseq; card->waitpofready = ergo_waitpofready; card->set_errlog_state = ergo_set_errlog_state; - INIT_WORK(&card->irq_queue, ergo_irq_bh); + INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card); card->hysdn_lock = SPIN_LOCK_UNLOCKED; return (0); diff --git a/trunk/drivers/isdn/i4l/isdn_net.c b/trunk/drivers/isdn/i4l/isdn_net.c index 2e4daebfb7e0..1f8d6ae66b41 100644 --- a/trunk/drivers/isdn/i4l/isdn_net.c +++ b/trunk/drivers/isdn/i4l/isdn_net.c @@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb) /* * called from tq_immediate */ -static void isdn_net_softint(struct work_struct *work) +static void isdn_net_softint(void *private) { - isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); + isdn_net_local *lp = private; struct sk_buff *skb; spin_lock_bh(&lp->xmit_lock); @@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master) netdev->local->netdev = netdev; netdev->local->next = netdev->local; - INIT_WORK(&netdev->local->tqueue, isdn_net_softint); + INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); spin_lock_init(&netdev->local->xmit_lock); netdev->local->isdn_device = -1; diff --git a/trunk/drivers/isdn/pcbit/drv.c b/trunk/drivers/isdn/pcbit/drv.c index 1966f3410a13..6ead5e1508b7 100644 --- a/trunk/drivers/isdn/pcbit/drv.c +++ b/trunk/drivers/isdn/pcbit/drv.c @@ -68,6 +68,8 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list); static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); +extern void pcbit_deliver(void * data); + int pcbit_init_dev(int board, int mem_base, int irq) { struct pcbit_dev *dev; @@ -127,7 +129,7 @@ int pcbit_init_dev(int board, int mem_base, int irq) memset(dev->b2, 0, sizeof(struct pcbit_chan)); dev->b2->id = 1; - INIT_WORK(&dev->qdelivery, pcbit_deliver); + INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); /* * interrupts diff --git a/trunk/drivers/isdn/pcbit/layer2.c b/trunk/drivers/isdn/pcbit/layer2.c index 0c9f6df873fc..937fd2120381 100644 --- a/trunk/drivers/isdn/pcbit/layer2.c +++ b/trunk/drivers/isdn/pcbit/layer2.c @@ -67,6 +67,7 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg, * Prototypes */ +void pcbit_deliver(void *data); static void pcbit_transmit(struct pcbit_dev *dev); static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); @@ -298,12 +299,11 @@ pcbit_transmit(struct pcbit_dev *dev) */ void -pcbit_deliver(struct work_struct *work) +pcbit_deliver(void *data) { struct frame_buf *frame; unsigned long flags, msg; - struct pcbit_dev *dev = - container_of(work, struct pcbit_dev, qdelivery); + struct pcbit_dev *dev = (struct pcbit_dev *) data; spin_lock_irqsave(&dev->lock, flags); diff --git a/trunk/drivers/isdn/pcbit/pcbit.h b/trunk/drivers/isdn/pcbit/pcbit.h index 19c18e88ff16..388bacefd23a 100644 --- a/trunk/drivers/isdn/pcbit/pcbit.h +++ b/trunk/drivers/isdn/pcbit/pcbit.h @@ -166,6 +166,4 @@ struct pcbit_ioctl { #define L2_RUNNING 5 #define L2_ERROR 6 -extern void pcbit_deliver(struct work_struct *work); - #endif diff --git a/trunk/drivers/leds/Kconfig b/trunk/drivers/leds/Kconfig index 176142c61492..9c39b98d5a5b 100644 --- a/trunk/drivers/leds/Kconfig +++ b/trunk/drivers/leds/Kconfig @@ -76,12 +76,6 @@ config LEDS_NET48XX This option enables support for the Soekris net4801 and net4826 error LED. -config LEDS_WRAP - tristate "LED Support for the WRAP series LEDs" - depends on LEDS_CLASS && SCx200_GPIO - help - This option enables support for the PCEngines WRAP programmable LEDs. - comment "LED Triggers" config LEDS_TRIGGERS diff --git a/trunk/drivers/leds/Makefile b/trunk/drivers/leds/Makefile index 500de3dc962a..6aa2aed7539d 100644 --- a/trunk/drivers/leds/Makefile +++ b/trunk/drivers/leds/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o -obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o # LED Triggers obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o diff --git a/trunk/drivers/leds/leds-wrap.c b/trunk/drivers/leds/leds-wrap.c deleted file mode 100644 index 27fb2d8e991f..000000000000 --- a/trunk/drivers/leds/leds-wrap.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * LEDs driver for PCEngines WRAP - * - * Copyright (C) 2006 Kristian Kielhofner - * - * Based on leds-net48xx.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#define DRVNAME "wrap-led" -#define WRAP_ERROR_LED_GPIO 3 -#define WRAP_EXTRA_LED_GPIO 18 - -static struct platform_device *pdev; - -static void wrap_error_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - if (value) - scx200_gpio_set_low(WRAP_ERROR_LED_GPIO); - else - scx200_gpio_set_high(WRAP_ERROR_LED_GPIO); -} - -static void wrap_extra_led_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - if (value) - scx200_gpio_set_low(WRAP_EXTRA_LED_GPIO); - else - scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO); -} - -static struct led_classdev wrap_error_led = { - .name = "wrap:error", - .brightness_set = wrap_error_led_set, -}; - -static struct led_classdev wrap_extra_led = { - .name = "wrap:extra", - .brightness_set = wrap_extra_led_set, -}; - -#ifdef CONFIG_PM -static int wrap_led_suspend(struct platform_device *dev, - pm_message_t state) -{ - led_classdev_suspend(&wrap_error_led); - led_classdev_suspend(&wrap_extra_led); - return 0; -} - -static int wrap_led_resume(struct platform_device *dev) -{ - led_classdev_resume(&wrap_error_led); - led_classdev_resume(&wrap_extra_led); - return 0; -} -#else -#define wrap_led_suspend NULL -#define wrap_led_resume NULL -#endif - -static int wrap_led_probe(struct platform_device *pdev) -{ - int ret; - - ret = led_classdev_register(&pdev->dev, &wrap_error_led); - if (ret == 0) { - ret = led_classdev_register(&pdev->dev, &wrap_extra_led); - if (ret < 0) - led_classdev_unregister(&wrap_error_led); - } - return ret; -} - -static int wrap_led_remove(struct platform_device *pdev) -{ - led_classdev_unregister(&wrap_error_led); - led_classdev_unregister(&wrap_extra_led); - return 0; -} - -static struct platform_driver wrap_led_driver = { - .probe = wrap_led_probe, - .remove = wrap_led_remove, - .suspend = wrap_led_suspend, - .resume = wrap_led_resume, - .driver = { - .name = DRVNAME, - .owner = THIS_MODULE, - }, -}; - -static int __init wrap_led_init(void) -{ - int ret; - - if (!scx200_gpio_present()) { - ret = -ENODEV; - goto out; - } - - ret = platform_driver_register(&wrap_led_driver); - if (ret < 0) - goto out; - - pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); - platform_driver_unregister(&wrap_led_driver); - goto out; - } - -out: - return ret; -} - -static void __exit wrap_led_exit(void) -{ - platform_device_unregister(pdev); - platform_driver_unregister(&wrap_led_driver); -} - -module_init(wrap_led_init); -module_exit(wrap_led_exit); - -MODULE_AUTHOR("Kristian Kielhofner "); -MODULE_DESCRIPTION("PCEngines WRAP LED driver"); -MODULE_LICENSE("GPL"); - diff --git a/trunk/drivers/macintosh/adb.c b/trunk/drivers/macintosh/adb.c index d43ea81d6df9..be0bd34ff6f9 100644 --- a/trunk/drivers/macintosh/adb.c +++ b/trunk/drivers/macintosh/adb.c @@ -267,12 +267,12 @@ adb_probe_task(void *x) } static void -__adb_probe_task(struct work_struct *bullshit) +__adb_probe_task(void *data) { adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL); } -static DECLARE_WORK(adb_reset_work, __adb_probe_task); +static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL); int adb_reset_bus(void) diff --git a/trunk/drivers/macintosh/apm_emu.c b/trunk/drivers/macintosh/apm_emu.c index 8862a83b8d84..1293876a2ebd 100644 --- a/trunk/drivers/macintosh/apm_emu.c +++ b/trunk/drivers/macintosh/apm_emu.c @@ -529,8 +529,7 @@ static int __init apm_emu_init(void) if (apm_proc) apm_proc->owner = THIS_MODULE; - if (misc_register(&apm_device) != 0) - printk(KERN_INFO "Could not create misc. device for apm\n"); + misc_register(&apm_device); pmu_register_sleep_notifier(&apm_sleep_notifier); diff --git a/trunk/drivers/macintosh/rack-meter.c b/trunk/drivers/macintosh/rack-meter.c index 5ed41fe84e57..f1b6f563673a 100644 --- a/trunk/drivers/macintosh/rack-meter.c +++ b/trunk/drivers/macintosh/rack-meter.c @@ -48,8 +48,7 @@ struct rackmeter_dma { } ____cacheline_aligned; struct rackmeter_cpu { - struct delayed_work sniffer; - struct rackmeter *rm; + struct work_struct sniffer; cputime64_t prev_wall; cputime64_t prev_idle; int zero; @@ -209,12 +208,11 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm) rackmeter_do_pause(rm, 0); } -static void rackmeter_do_timer(struct work_struct *work) +static void rackmeter_do_timer(void *data) { - struct rackmeter_cpu *rcpu = - container_of(work, struct rackmeter_cpu, sniffer.work); - struct rackmeter *rm = rcpu->rm; + struct rackmeter *rm = data; unsigned int cpu = smp_processor_id(); + struct rackmeter_cpu *rcpu = &rm->cpu[cpu]; cputime64_t cur_jiffies, total_idle_ticks; unsigned int total_ticks, idle_ticks; int i, offset, load, cumm, pause; @@ -265,10 +263,8 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) * on those machines yet */ - rm->cpu[0].rm = rm; - INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer); - rm->cpu[1].rm = rm; - INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer); + INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm); + INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm); for_each_online_cpu(cpu) { struct rackmeter_cpu *rcpu; diff --git a/trunk/drivers/macintosh/smu.c b/trunk/drivers/macintosh/smu.c index 6dde27ab79a8..4f724cdd2efa 100644 --- a/trunk/drivers/macintosh/smu.c +++ b/trunk/drivers/macintosh/smu.c @@ -601,7 +601,7 @@ core_initcall(smu_late_init); * sysfs visibility */ -static void smu_expose_childs(struct work_struct *unused) +static void smu_expose_childs(void *unused) { struct device_node *np; @@ -611,7 +611,7 @@ static void smu_expose_childs(struct work_struct *unused) &smu->of_dev->dev); } -static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); +static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); static int smu_platform_probe(struct of_device* dev, const struct of_device_id *match) diff --git a/trunk/drivers/macintosh/therm_adt746x.c b/trunk/drivers/macintosh/therm_adt746x.c index 3d3bf1643e73..13b953ae8ebc 100644 --- a/trunk/drivers/macintosh/therm_adt746x.c +++ b/trunk/drivers/macintosh/therm_adt746x.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/macintosh/via-pmu.c b/trunk/drivers/macintosh/via-pmu.c index c8558d4ed506..e63ea1c1f3c1 100644 --- a/trunk/drivers/macintosh/via-pmu.c +++ b/trunk/drivers/macintosh/via-pmu.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/drivers/macintosh/windfarm_core.c b/trunk/drivers/macintosh/windfarm_core.c index e947af982f93..ab3faa702d58 100644 --- a/trunk/drivers/macintosh/windfarm_core.c +++ b/trunk/drivers/macintosh/windfarm_core.c @@ -34,7 +34,6 @@ #include #include #include -#include #include diff --git a/trunk/drivers/md/dm-crypt.c b/trunk/drivers/md/dm-crypt.c index c7bee4f2eedb..08a40f4e4f60 100644 --- a/trunk/drivers/md/dm-crypt.c +++ b/trunk/drivers/md/dm-crypt.c @@ -101,7 +101,7 @@ struct crypt_config { #define MIN_POOL_PAGES 32 #define MIN_BIO_PAGES 8 -static struct kmem_cache *_crypt_io_pool; +static kmem_cache_t *_crypt_io_pool; /* * Different IV generation algorithms: @@ -458,11 +458,11 @@ static void dec_pending(struct crypt_io *io, int error) * interrupt context. */ static struct workqueue_struct *_kcryptd_workqueue; -static void kcryptd_do_work(struct work_struct *work); +static void kcryptd_do_work(void *data); static void kcryptd_queue_io(struct crypt_io *io) { - INIT_WORK(&io->work, kcryptd_do_work); + INIT_WORK(&io->work, kcryptd_do_work, io); queue_work(_kcryptd_workqueue, &io->work); } @@ -618,9 +618,9 @@ static void process_read_endio(struct crypt_io *io) dec_pending(io, crypt_convert(cc, &ctx)); } -static void kcryptd_do_work(struct work_struct *work) +static void kcryptd_do_work(void *data) { - struct crypt_io *io = container_of(work, struct crypt_io, work); + struct crypt_io *io = data; if (io->post_process) process_read_endio(io); diff --git a/trunk/drivers/md/dm-mpath.c b/trunk/drivers/md/dm-mpath.c index cf8bf052138e..d754e0bc6e90 100644 --- a/trunk/drivers/md/dm-mpath.c +++ b/trunk/drivers/md/dm-mpath.c @@ -101,11 +101,11 @@ typedef int (*action_fn) (struct pgpath *pgpath); #define MIN_IOS 256 /* Mempool size */ -static struct kmem_cache *_mpio_cache; +static kmem_cache_t *_mpio_cache; struct workqueue_struct *kmultipathd; -static void process_queued_ios(struct work_struct *work); -static void trigger_event(struct work_struct *work); +static void process_queued_ios(void *data); +static void trigger_event(void *data); /*----------------------------------------------- @@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) INIT_LIST_HEAD(&m->priority_groups); spin_lock_init(&m->lock); m->queue_io = 1; - INIT_WORK(&m->process_queued_ios, process_queued_ios); - INIT_WORK(&m->trigger_event, trigger_event); + INIT_WORK(&m->process_queued_ios, process_queued_ios, m); + INIT_WORK(&m->trigger_event, trigger_event, m); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); @@ -379,10 +379,9 @@ static void dispatch_queued_ios(struct multipath *m) } } -static void process_queued_ios(struct work_struct *work) +static void process_queued_ios(void *data) { - struct multipath *m = - container_of(work, struct multipath, process_queued_ios); + struct multipath *m = (struct multipath *) data; struct hw_handler *hwh = &m->hw_handler; struct pgpath *pgpath = NULL; unsigned init_required = 0, must_queue = 1; @@ -422,10 +421,9 @@ static void process_queued_ios(struct work_struct *work) * An event is triggered whenever a path is taken out of use. * Includes path failure and PG bypass. */ -static void trigger_event(struct work_struct *work) +static void trigger_event(void *data) { - struct multipath *m = - container_of(work, struct multipath, trigger_event); + struct multipath *m = (struct multipath *) data; dm_table_event(m->ti->table); } diff --git a/trunk/drivers/md/dm-raid1.c b/trunk/drivers/md/dm-raid1.c index fc8cbb168e3e..48a653b3f518 100644 --- a/trunk/drivers/md/dm-raid1.c +++ b/trunk/drivers/md/dm-raid1.c @@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms) do_writes(ms, &writes); } -static void do_work(struct work_struct *ignored) +static void do_work(void *ignored) { struct mirror_set *ms; @@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void) dm_dirty_log_exit(); return r; } - INIT_WORK(&_kmirrord_work, do_work); + INIT_WORK(&_kmirrord_work, do_work, NULL); r = dm_register_target(&mirror_target); if (r < 0) { diff --git a/trunk/drivers/md/dm-snap.c b/trunk/drivers/md/dm-snap.c index b0ce2ce82278..5281e0094072 100644 --- a/trunk/drivers/md/dm-snap.c +++ b/trunk/drivers/md/dm-snap.c @@ -40,7 +40,7 @@ #define SNAPSHOT_PAGES 256 struct workqueue_struct *ksnapd; -static void flush_queued_bios(struct work_struct *work); +static void flush_queued_bios(void *data); struct pending_exception { struct exception e; @@ -88,8 +88,8 @@ struct pending_exception { * Hash table mapping origin volumes to lists of snapshots and * a lock to protect it */ -static struct kmem_cache *exception_cache; -static struct kmem_cache *pending_cache; +static kmem_cache_t *exception_cache; +static kmem_cache_t *pending_cache; static mempool_t *pending_pool; /* @@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size) return 0; } -static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) +static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) { struct list_head *slot; struct exception *ex, *next; @@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) } bio_list_init(&s->queued_bios); - INIT_WORK(&s->queued_bios_work, flush_queued_bios); + INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); /* Add snapshot to the list of snapshots for this origin */ /* Exceptions aren't triggered till snapshot_resume() is called */ @@ -603,10 +603,9 @@ static void flush_bios(struct bio *bio) } } -static void flush_queued_bios(struct work_struct *work) +static void flush_queued_bios(void *data) { - struct dm_snapshot *s = - container_of(work, struct dm_snapshot, queued_bios_work); + struct dm_snapshot *s = (struct dm_snapshot *) data; struct bio *queued_bios; unsigned long flags; diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c index 7ec1b112a6d5..fc4f743f3b53 100644 --- a/trunk/drivers/md/dm.c +++ b/trunk/drivers/md/dm.c @@ -121,8 +121,8 @@ struct mapped_device { }; #define MIN_IOS 256 -static struct kmem_cache *_io_cache; -static struct kmem_cache *_tio_cache; +static kmem_cache_t *_io_cache; +static kmem_cache_t *_tio_cache; static int __init local_init(void) { diff --git a/trunk/drivers/md/kcopyd.c b/trunk/drivers/md/kcopyd.c index b46f6c575f7e..f1db6eff4857 100644 --- a/trunk/drivers/md/kcopyd.c +++ b/trunk/drivers/md/kcopyd.c @@ -203,7 +203,7 @@ struct kcopyd_job { /* FIXME: this should scale with the number of pages */ #define MIN_JOBS 512 -static struct kmem_cache *_job_cache; +static kmem_cache_t *_job_cache; static mempool_t *_job_pool; /* @@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) /* * kcopyd does this every time it's woken up. */ -static void do_work(struct work_struct *ignored) +static void do_work(void *ignored) { /* * The order that these are called is *very* important. @@ -628,7 +628,7 @@ static int kcopyd_init(void) } kcopyd_clients++; - INIT_WORK(&_kcopyd_work, do_work); + INIT_WORK(&_kcopyd_work, do_work, NULL); mutex_unlock(&kcopyd_init_lock); return 0; } diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index 6c4345bde07e..8cbf9c9df1c3 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -39,10 +39,10 @@ #include #include #include /* for invalidate_bdev */ +#include #include #include #include -#include #include diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 52914d5cec76..69c3e201fa3b 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf) static int grow_stripes(raid5_conf_t *conf, int num) { - struct kmem_cache *sc; + kmem_cache_t *sc; int devs = conf->raid_disks; sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); @@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) LIST_HEAD(newstripes); struct disk_info *ndisks; int err = 0; - struct kmem_cache *sc; + kmem_cache_t *sc; int i; if (newsize <= conf->pool_size) diff --git a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c index 6e166801505d..06893243f3d4 100644 --- a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c @@ -63,7 +63,7 @@ struct flexcop_pci { unsigned long last_irq; - struct delayed_work irq_check_work; + struct work_struct irq_check_work; struct flexcop_device *fc_dev; }; @@ -97,10 +97,9 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi return 0; } -static void flexcop_pci_irq_check_work(struct work_struct *work) +static void flexcop_pci_irq_check_work(void *data) { - struct flexcop_pci *fc_pci = - container_of(work, struct flexcop_pci, irq_check_work.work); + struct flexcop_pci *fc_pci = data; struct flexcop_device *fc = fc_pci->fc_dev; flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); @@ -372,7 +371,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) goto err_fc_exit; - INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work); + INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); return ret; diff --git a/trunk/drivers/media/dvb/cinergyT2/cinergyT2.c b/trunk/drivers/media/dvb/cinergyT2/cinergyT2.c index 9123147e376f..8a7dd507cf6e 100644 --- a/trunk/drivers/media/dvb/cinergyT2/cinergyT2.c +++ b/trunk/drivers/media/dvb/cinergyT2/cinergyT2.c @@ -128,7 +128,7 @@ struct cinergyt2 { struct dvbt_set_parameters_msg param; struct dvbt_get_status_msg status; - struct delayed_work query_work; + struct work_struct query_work; wait_queue_head_t poll_wq; int pending_fe_events; @@ -142,7 +142,7 @@ struct cinergyt2 { #ifdef ENABLE_RC struct input_dev *rc_input_dev; char phys[64]; - struct delayed_work rc_query_work; + struct work_struct rc_query_work; int rc_input_event; u32 rc_last_code; unsigned long last_event_jiffies; @@ -287,7 +287,7 @@ static int cinergyt2_alloc_stream_urbs (struct cinergyt2 *cinergyt2) int i; cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE, - GFP_KERNEL, &cinergyt2->streambuf_dmahandle); + SLAB_KERNEL, &cinergyt2->streambuf_dmahandle); if (!cinergyt2->streambuf) { dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n"); return -ENOMEM; @@ -723,10 +723,9 @@ static struct dvb_device cinergyt2_fe_template = { #ifdef ENABLE_RC -static void cinergyt2_query_rc (struct work_struct *work) +static void cinergyt2_query_rc (void *data) { - struct cinergyt2 *cinergyt2 = - container_of(work, struct cinergyt2, rc_query_work.work); + struct cinergyt2 *cinergyt2 = data; char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; struct cinergyt2_rc_event rc_events[12]; int n, len, i; @@ -807,7 +806,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2) strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); cinergyt2->rc_input_event = KEY_MAX; cinergyt2->rc_last_code = ~0; - INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc); + INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); input_dev->name = DRIVER_NAME " remote control"; input_dev->phys = cinergyt2->phys; @@ -848,10 +847,9 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { } #endif /* ENABLE_RC */ -static void cinergyt2_query (struct work_struct *work) +static void cinergyt2_query (void *data) { - struct cinergyt2 *cinergyt2 = - container_of(work, struct cinergyt2, query_work.work); + struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; struct dvbt_get_status_msg *s = &cinergyt2->status; uint8_t lock_bits; @@ -895,7 +893,7 @@ static int cinergyt2_probe (struct usb_interface *intf, mutex_init(&cinergyt2->sem); init_waitqueue_head (&cinergyt2->poll_wq); - INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query); + INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); cinergyt2->udev = interface_to_usbdev(intf); cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c index e85972222ab4..a2ab2eebfc68 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_net.c b/trunk/drivers/media/dvb/dvb-core/dvb_net.c index ebf4dc5190f6..8859ab74f0fe 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_net.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_net.c @@ -127,7 +127,6 @@ struct dvb_net_priv { int in_use; struct net_device_stats stats; u16 pid; - struct net_device *net; struct dvb_net *host; struct dmx_demux *demux; struct dmx_section_feed *secfeed; @@ -1124,11 +1123,10 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc) } -static void wq_set_multicast_list (struct work_struct *work) +static void wq_set_multicast_list (void *data) { - struct dvb_net_priv *priv = - container_of(work, struct dvb_net_priv, set_multicast_list_wq); - struct net_device *dev = priv->net; + struct net_device *dev = data; + struct dvb_net_priv *priv = dev->priv; dvb_net_feed_stop(dev); priv->rx_mode = RX_MODE_UNI; @@ -1169,11 +1167,9 @@ static void dvb_net_set_multicast_list (struct net_device *dev) } -static void wq_restart_net_feed (struct work_struct *work) +static void wq_restart_net_feed (void *data) { - struct dvb_net_priv *priv = - container_of(work, struct dvb_net_priv, restart_net_feed_wq); - struct net_device *dev = priv->net; + struct net_device *dev = data; if (netif_running(dev)) { dvb_net_feed_stop(dev); @@ -1280,7 +1276,6 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) dvbnet->device[if_num] = net; priv = net->priv; - priv->net = net; priv->demux = dvbnet->demux; priv->pid = pid; priv->rx_mode = RX_MODE_UNI; @@ -1289,8 +1284,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) priv->feedtype = feedtype; reset_ule(priv); - INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list); - INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed); + INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); + INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); mutex_init(&priv->mutex); net->base_addr = pid; diff --git a/trunk/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/trunk/drivers/media/dvb/dvb-usb/dvb-usb-remote.c index 794e4471561c..0a3a0b6c2350 100644 --- a/trunk/drivers/media/dvb/dvb-usb/dvb-usb-remote.c +++ b/trunk/drivers/media/dvb/dvb-usb/dvb-usb-remote.c @@ -13,10 +13,9 @@ * * TODO: Fix the repeat rate of the input device. */ -static void dvb_usb_read_remote_control(struct work_struct *work) +static void dvb_usb_read_remote_control(void *data) { - struct dvb_usb_device *d = - container_of(work, struct dvb_usb_device, rc_query_work.work); + struct dvb_usb_device *d = data; u32 event; int state; @@ -129,7 +128,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d) input_register_device(d->rc_input_dev); - INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control); + INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); info("schedule remote query interval to %d msecs.", d->props.rc_interval); schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); diff --git a/trunk/drivers/media/dvb/dvb-usb/dvb-usb.h b/trunk/drivers/media/dvb/dvb-usb/dvb-usb.h index 0d721731a524..376c45a8e779 100644 --- a/trunk/drivers/media/dvb/dvb-usb/dvb-usb.h +++ b/trunk/drivers/media/dvb/dvb-usb/dvb-usb.h @@ -369,7 +369,7 @@ struct dvb_usb_device { /* remote control */ struct input_dev *rc_input_dev; char rc_phys[64]; - struct delayed_work rc_query_work; + struct work_struct rc_query_work; u32 last_event; int last_state; diff --git a/trunk/drivers/media/dvb/dvb-usb/usb-urb.c b/trunk/drivers/media/dvb/dvb-usb/usb-urb.c index 397f51a7b2ad..78035ee824ca 100644 --- a/trunk/drivers/media/dvb/dvb-usb/usb-urb.c +++ b/trunk/drivers/media/dvb/dvb-usb/usb-urb.c @@ -116,7 +116,7 @@ static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num, for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) { deb_mem("allocating buffer %d\n",stream->buf_num); if (( stream->buf_list[stream->buf_num] = - usb_buffer_alloc(stream->udev, size, GFP_ATOMIC, + usb_buffer_alloc(stream->udev, size, SLAB_ATOMIC, &stream->dma_addr[stream->buf_num]) ) == NULL) { deb_mem("not enough memory for urb-buffer allocation.\n"); usb_free_stream_buffers(stream); diff --git a/trunk/drivers/media/dvb/frontends/l64781.c b/trunk/drivers/media/dvb/frontends/l64781.c index 1aeacb1c4af7..f3bc82e44a28 100644 --- a/trunk/drivers/media/dvb/frontends/l64781.c +++ b/trunk/drivers/media/dvb/frontends/l64781.c @@ -36,7 +36,7 @@ struct l64781_state { struct dvb_frontend frontend; /* private demodulator data */ - unsigned int first:1; + int first:1; }; #define dprintk(args...) \ diff --git a/trunk/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/trunk/drivers/media/dvb/ttusb-dec/ttusb_dec.c index 10b121ada833..8135f3e76aeb 100644 --- a/trunk/drivers/media/dvb/ttusb-dec/ttusb_dec.c +++ b/trunk/drivers/media/dvb/ttusb-dec/ttusb_dec.c @@ -1244,7 +1244,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec) return -ENOMEM; } dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE, - GFP_ATOMIC, &dec->irq_dma_handle); + SLAB_ATOMIC, &dec->irq_dma_handle); if(!dec->irq_buffer) { return -ENOMEM; } diff --git a/trunk/drivers/media/radio/Kconfig b/trunk/drivers/media/radio/Kconfig index 920b63f8cf05..6d96b17a7f81 100644 --- a/trunk/drivers/media/radio/Kconfig +++ b/trunk/drivers/media/radio/Kconfig @@ -173,6 +173,38 @@ config RADIO_MAESTRO To compile this driver as a module, choose M here: the module will be called radio-maestro. +config RADIO_MIROPCM20 + tristate "miroSOUND PCM20 radio" + depends on ISA && VIDEO_V4L1 && SOUND_ACI_MIXER + ---help--- + Choose Y here if you have this FM radio card. You also need to say Y + to "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20 radio)" (in "Sound") + for this to work. + + In order to control your radio card, you will need to use programs + that are compatible with the Video For Linux API. Information on + this API and pointers to "v4l" programs may be found at + . + + To compile this driver as a module, choose M here: the + module will be called miropcm20. + +config RADIO_MIROPCM20_RDS + tristate "miroSOUND PCM20 radio RDS user interface (EXPERIMENTAL)" + depends on RADIO_MIROPCM20 && EXPERIMENTAL + ---help--- + Choose Y here if you want to see RDS/RBDS information like + RadioText, Programme Service name, Clock Time and date, Programme + Type and Traffic Announcement/Programme identification. + + It's not possible to read the raw RDS packets from the device, so + the driver cant provide an V4L interface for this. But the + availability of RDS is reported over V4L by the basic driver + already. Here RDS can be read from files in /dev/v4l/rds. + + To compile this driver as a module, choose M here: the + module will be called miropcm20-rds. + config RADIO_SF16FMI tristate "SF16FMI Radio" depends on ISA && VIDEO_V4L2 diff --git a/trunk/drivers/media/video/cpia_pp.c b/trunk/drivers/media/video/cpia_pp.c index b12cec94f4cc..41f4b8d17559 100644 --- a/trunk/drivers/media/video/cpia_pp.c +++ b/trunk/drivers/media/video/cpia_pp.c @@ -82,8 +82,6 @@ struct pp_cam_entry { struct pardevice *pdev; struct parport *port; struct work_struct cb_task; - void (*cb_func)(void *cbdata); - void *cb_data; int open_count; wait_queue_head_t wq_stream; /* image state flags */ @@ -132,20 +130,6 @@ static void cpia_parport_disable_irq( struct parport *port ) { #define PARPORT_CHUNK_SIZE PAGE_SIZE -static void cpia_pp_run_callback(struct work_struct *work) -{ - void (*cb_func)(void *cbdata); - void *cb_data; - struct pp_cam_entry *cam; - - cam = container_of(work, struct pp_cam_entry, cb_task); - cb_func = cam->cb_func; - cb_data = cam->cb_data; - work_release(work); - - cb_func(cb_data); -} - /**************************************************************************** * * CPiA-specific low-level parport functions for nibble uploads @@ -680,9 +664,7 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo int retval = 0; if(cam->port->irq != PARPORT_IRQ_NONE) { - cam->cb_func = cb; - cam->cb_data = cbdata; - INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback); + INIT_WORK(&cam->cb_task, cb, cbdata); } else { retval = -1; } diff --git a/trunk/drivers/media/video/cx88/cx88-input.c b/trunk/drivers/media/video/cx88/cx88-input.c index e60a0a52e4b2..57e1c024a547 100644 --- a/trunk/drivers/media/video/cx88/cx88-input.c +++ b/trunk/drivers/media/video/cx88/cx88-input.c @@ -145,9 +145,9 @@ static void ir_timer(unsigned long data) schedule_work(&ir->work); } -static void cx88_ir_work(struct work_struct *work) +static void cx88_ir_work(void *data) { - struct cx88_IR *ir = container_of(work, struct cx88_IR, work); + struct cx88_IR *ir = data; unsigned long timeout; cx88_ir_handle_key(ir); @@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) core->ir = ir; if (ir->polling) { - INIT_WORK(&ir->work, cx88_ir_work); + INIT_WORK(&ir->work, cx88_ir_work, ir); init_timer(&ir->timer); ir->timer.function = ir_timer; ir->timer.data = (unsigned long)ir; diff --git a/trunk/drivers/media/video/ir-kbd-i2c.c b/trunk/drivers/media/video/ir-kbd-i2c.c index ab87e7bfe84f..1457b1602221 100644 --- a/trunk/drivers/media/video/ir-kbd-i2c.c +++ b/trunk/drivers/media/video/ir-kbd-i2c.c @@ -268,9 +268,9 @@ static void ir_timer(unsigned long data) schedule_work(&ir->work); } -static void ir_work(struct work_struct *work) +static void ir_work(void *data) { - struct IR_i2c *ir = container_of(work, struct IR_i2c, work); + struct IR_i2c *ir = data; ir_key_poll(ir); mod_timer(&ir->timer, jiffies+HZ/10); } @@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr, ir->input->name,ir->input->phys,adap->name); /* start polling via eventd */ - INIT_WORK(&ir->work, ir_work); + INIT_WORK(&ir->work, ir_work, ir); init_timer(&ir->timer); ir->timer.function = ir_timer; ir->timer.data = (unsigned long)ir; diff --git a/trunk/drivers/media/video/msp3400-driver.c b/trunk/drivers/media/video/msp3400-driver.c index e1b56dc13c3f..cf43df3fe708 100644 --- a/trunk/drivers/media/video/msp3400-driver.c +++ b/trunk/drivers/media/video/msp3400-driver.c @@ -56,7 +56,7 @@ #include #include #include -#include +#include #include "msp3400-driver.h" /* ---------------------------------------------------------------------- */ diff --git a/trunk/drivers/media/video/pvrusb2/pvrusb2-context.c b/trunk/drivers/media/video/pvrusb2/pvrusb2-context.c index cf129746205d..f129f316d20e 100644 --- a/trunk/drivers/media/video/pvrusb2/pvrusb2-context.c +++ b/trunk/drivers/media/video/pvrusb2/pvrusb2-context.c @@ -45,21 +45,16 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp) } -static void pvr2_context_poll(struct work_struct *work) +static void pvr2_context_poll(struct pvr2_context *mp) { - struct pvr2_context *mp = - container_of(work, struct pvr2_context, workpoll); pvr2_context_enter(mp); do { pvr2_hdw_poll(mp->hdw); } while (0); pvr2_context_exit(mp); } -static void pvr2_context_setup(struct work_struct *work) +static void pvr2_context_setup(struct pvr2_context *mp) { - struct pvr2_context *mp = - container_of(work, struct pvr2_context, workinit); - pvr2_context_enter(mp); do { if (!pvr2_hdw_dev_ok(mp->hdw)) break; pvr2_hdw_setup(mp->hdw); @@ -97,8 +92,8 @@ struct pvr2_context *pvr2_context_create( } mp->workqueue = create_singlethread_workqueue("pvrusb2"); - INIT_WORK(&mp->workinit, pvr2_context_setup); - INIT_WORK(&mp->workpoll, pvr2_context_poll); + INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); + INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); queue_work(mp->workqueue,&mp->workinit); done: return mp; diff --git a/trunk/drivers/media/video/saa6588.c b/trunk/drivers/media/video/saa6588.c index 92eabf88a09b..7b9859c33018 100644 --- a/trunk/drivers/media/video/saa6588.c +++ b/trunk/drivers/media/video/saa6588.c @@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data) schedule_work(&s->work); } -static void saa6588_work(struct work_struct *work) +static void saa6588_work(void *data) { - struct saa6588 *s = container_of(work, struct saa6588, work); + struct saa6588 *s = (struct saa6588 *)data; saa6588_i2c_poll(s); mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); @@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind) saa6588_configure(s); /* start polling via eventd */ - INIT_WORK(&s->work, saa6588_work); + INIT_WORK(&s->work, saa6588_work, s); init_timer(&s->timer); s->timer.function = saa6588_timer; s->timer.data = (unsigned long)s; diff --git a/trunk/drivers/media/video/saa7134/saa7134-empress.c b/trunk/drivers/media/video/saa7134/saa7134-empress.c index daaae870a2c4..65d044086ce9 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-empress.c +++ b/trunk/drivers/media/video/saa7134/saa7134-empress.c @@ -343,10 +343,9 @@ static struct video_device saa7134_empress_template = .minor = -1, }; -static void empress_signal_update(struct work_struct *work) +static void empress_signal_update(void* data) { - struct saa7134_dev* dev = - container_of(work, struct saa7134_dev, empress_workqueue); + struct saa7134_dev* dev = (struct saa7134_dev*) data; if (dev->nosignal) { dprintk("no video signal\n"); @@ -379,7 +378,7 @@ static int empress_init(struct saa7134_dev *dev) "%s empress (%s)", dev->name, saa7134_boards[dev->board].name); - INIT_WORK(&dev->empress_workqueue, empress_signal_update); + INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, empress_nr[dev->nr]); @@ -400,7 +399,7 @@ static int empress_init(struct saa7134_dev *dev) sizeof(struct saa7134_buf), dev); - empress_signal_update(&dev->empress_workqueue); + empress_signal_update(dev); return 0; } diff --git a/trunk/drivers/media/video/tvaudio.c b/trunk/drivers/media/video/tvaudio.c index d506dfaa45a9..fcaef4bf8289 100644 --- a/trunk/drivers/media/video/tvaudio.c +++ b/trunk/drivers/media/video/tvaudio.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/media/video/video-buf-dvb.c b/trunk/drivers/media/video/video-buf-dvb.c index fcc5467e7636..f53edf1923b7 100644 --- a/trunk/drivers/media/video/video-buf-dvb.c +++ b/trunk/drivers/media/video/video-buf-dvb.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/media/video/vivi.c b/trunk/drivers/media/video/vivi.c index 9986de5cb3d6..3c8dc72dc8e9 100644 --- a/trunk/drivers/media/video/vivi.c +++ b/trunk/drivers/media/video/vivi.c @@ -36,7 +36,6 @@ #include #include #include -#include /* Wake up at about 30 fps */ #define WAKE_NUMERATOR 30 diff --git a/trunk/drivers/message/fusion/mptbase.c b/trunk/drivers/message/fusion/mptbase.c index 6e068cf1049b..051b7c5b8f03 100644 --- a/trunk/drivers/message/fusion/mptbase.c +++ b/trunk/drivers/message/fusion/mptbase.c @@ -347,7 +347,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure @@ -387,16 +387,14 @@ mpt_interrupt(int irq, void *bus_id) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_base_reply - MPT base driver's callback routine +/* + * mpt_base_reply - MPT base driver's callback routine; all base driver + * "internal" request/reply processing is routed here. + * Currently used for EventNotification and EventAck handling. * @ioc: Pointer to MPT_ADAPTER structure * @mf: Pointer to original MPT request frame * @reply: Pointer to MPT reply frame (NULL if TurboReply) * - * MPT base driver's callback routine; all base driver - * "internal" request/reply processing is routed here. - * Currently used for EventNotification and EventAck handling. - * * Returns 1 indicating original alloc'd request frame ptr * should be freed, or 0 if it shouldn't. */ @@ -532,7 +530,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value) * * This routine is called by a protocol-specific driver (SCSI host, - * LAN, SCSI target) to register its reply callback routine. Each + * LAN, SCSI target) to register it's reply callback routine. Each * protocol-specific driver must do this before it will be able to * use any IOC resources, such as obtaining request frames. * @@ -574,7 +572,7 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass) * mpt_deregister - Deregister a protocol drivers resources. * @cb_idx: previously registered callback handle * - * Each protocol-specific driver should call this routine when its + * Each protocol-specific driver should call this routine when it's * module is unloaded. */ void @@ -619,7 +617,7 @@ mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc) * * Each protocol-specific driver should call this routine * when it does not (or can no longer) handle events, - * or when its module is unloaded. + * or when it's module is unloaded. */ void mpt_event_deregister(int cb_idx) @@ -658,7 +656,7 @@ mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func) * * Each protocol-specific driver should call this routine * when it does not (or can no longer) handle IOC reset handling, - * or when its module is unloaded. + * or when it's module is unloaded. */ void mpt_reset_deregister(int cb_idx) @@ -672,8 +670,6 @@ mpt_reset_deregister(int cb_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_device_driver_register - Register device driver hooks - * @dd_cbfunc: driver callbacks struct - * @cb_idx: MPT protocol driver index */ int mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) @@ -700,7 +696,6 @@ mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_device_driver_deregister - DeRegister device driver hooks - * @cb_idx: MPT protocol driver index */ void mpt_device_driver_deregister(int cb_idx) @@ -892,7 +887,8 @@ mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_send_handshake_request - Send MPT request via doorbell handshake method. + * mpt_send_handshake_request - Send MPT request via doorbell + * handshake method. * @handle: Handle of registered MPT protocol driver * @ioc: Pointer to MPT adapter structure * @reqBytes: Size of the request in bytes @@ -985,13 +981,10 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req, /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_host_page_access_control - control the IOC's Host Page Buffer access + * mpt_host_page_access_control - provides mechanism for the host + * driver to control the IOC's Host Page Buffer access. * @ioc: Pointer to MPT adapter structure * @access_control_value: define bits below - * @sleepFlag: Specifies whether the process can sleep - * - * Provides mechanism for the host driver to control the IOC's - * Host Page Buffer access. * * Access Control Value - bits[15:12] * 0h Reserved @@ -1029,10 +1022,10 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_host_page_alloc - allocate system memory for the fw - * @ioc: Pointer to pointer to IOC adapter - * @ioc_init: Pointer to ioc init config page - * * If we already allocated memory in past, then resend the same pointer. + * ioc@: Pointer to pointer to IOC adapter + * ioc_init@: Pointer to ioc init config page + * * Returns 0 for success, non-zero for failure. */ static int @@ -1098,15 +1091,12 @@ return 0; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure. + * mpt_verify_adapter - Given a unique IOC identifier, set pointer to + * the associated MPT adapter structure. * @iocid: IOC unique identifier (integer) * @iocpp: Pointer to pointer to IOC adapter * - * Given a unique IOC identifier, set pointer to the associated MPT - * adapter structure. - * - * Returns iocid and sets iocpp if iocid is found. - * Returns -1 if iocid is not found. + * Returns iocid and sets iocpp. */ int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp) @@ -1125,10 +1115,9 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_attach - Install a PCI intelligent MPT adapter. * @pdev: Pointer to pci_dev structure - * @id: PCI device ID information * * This routine performs all the steps necessary to bring the IOC of * a MPT adapter to a OPERATIONAL state. This includes registering @@ -1428,9 +1417,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_detach - Remove a PCI intelligent MPT adapter. * @pdev: Pointer to pci_dev structure + * */ void @@ -1476,10 +1466,10 @@ mpt_detach(struct pci_dev *pdev) */ #ifdef CONFIG_PM /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_suspend - Fusion MPT base driver suspend routine. - * @pdev: Pointer to pci_dev structure - * @state: new state to enter + * + * */ int mpt_suspend(struct pci_dev *pdev, pm_message_t state) @@ -1515,9 +1505,10 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_resume - Fusion MPT base driver resume routine. - * @pdev: Pointer to pci_dev structure + * + * */ int mpt_resume(struct pci_dev *pdev) @@ -1575,7 +1566,7 @@ mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_do_ioc_recovery - Initialize or recover MPT adapter. * @ioc: Pointer to MPT adapter structure * @reason: Event word / reason @@ -1901,15 +1892,13 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_detect_bound_ports - Search for matching PCI bus/dev_function +/* + * mpt_detect_bound_ports - Search for PCI bus/dev_function + * which matches PCI bus/dev_function (+/-1) for newly discovered 929, + * 929X, 1030 or 1035. * @ioc: Pointer to MPT adapter structure * @pdev: Pointer to (struct pci_dev) structure * - * Search for PCI bus/dev_function which matches - * PCI bus/dev_function (+/-1) for newly discovered 929, - * 929X, 1030 or 1035. - * * If match on PCI dev_function +/-1 is found, bind the two MPT adapters * using alt_ioc pointer fields in their %MPT_ADAPTER structures. */ @@ -1956,9 +1945,9 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_adapter_disable - Disable misbehaving MPT adapter. - * @ioc: Pointer to MPT adapter structure + * @this: Pointer to MPT adapter structure */ static void mpt_adapter_disable(MPT_ADAPTER *ioc) @@ -2057,8 +2046,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_adapter_dispose - Free all resources associated with an MPT adapter +/* + * mpt_adapter_dispose - Free all resources associated with a MPT + * adapter. * @ioc: Pointer to MPT adapter structure * * This routine unregisters h/w resources and frees all alloc'd memory @@ -2109,8 +2099,8 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * MptDisplayIocCapabilities - Disply IOC's capabilities. +/* + * MptDisplayIocCapabilities - Disply IOC's capacilities. * @ioc: Pointer to MPT adapter structure */ static void @@ -2152,7 +2142,7 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * MakeIocReady - Get IOC to a READY state, using KickStart if needed. * @ioc: Pointer to MPT_ADAPTER structure * @force: Force hard KickStart of IOC @@ -2289,7 +2279,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_GetIocState - Get the current state of a MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @cooked: Request raw or cooked IOC state @@ -2314,7 +2304,7 @@ mpt_GetIocState(MPT_ADAPTER *ioc, int cooked) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * GetIocFacts - Send IOCFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @sleepFlag: Specifies whether the process can sleep @@ -2488,7 +2478,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * GetPortFacts - Send PortFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number @@ -2555,7 +2545,7 @@ GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * SendIocInit - Send IOCInit request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @sleepFlag: Specifies whether the process can sleep @@ -2640,7 +2630,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) } /* No need to byte swap the multibyte fields in the reply - * since we don't even look at its contents. + * since we don't even look at it's contents. */ dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n", @@ -2682,7 +2672,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * SendPortEnable - Send PortEnable request to MPT adapter port. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number to enable @@ -2733,13 +2723,9 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag) return rc; } -/** - * mpt_alloc_fw_memory - allocate firmware memory - * @ioc: Pointer to MPT_ADAPTER structure - * @size: total FW bytes - * - * If memory has already been allocated, the same (cached) value - * is returned. +/* + * ioc: Pointer to MPT_ADAPTER structure + * size - total FW bytes */ void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size) @@ -2756,12 +2742,9 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size) ioc->alloc_total += size; } } -/** - * mpt_free_fw_memory - free firmware memory - * @ioc: Pointer to MPT_ADAPTER structure - * - * If alt_img is NULL, delete from ioc structure. - * Else, delete a secondary image in same format. +/* + * If alt_img is NULL, delete from ioc structure. + * Else, delete a secondary image in same format. */ void mpt_free_fw_memory(MPT_ADAPTER *ioc) @@ -2780,7 +2763,7 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port. * @ioc: Pointer to MPT_ADAPTER structure * @sleepFlag: Specifies whether the process can sleep @@ -2882,10 +2865,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_downloadboot - DownloadBoot code * @ioc: Pointer to MPT_ADAPTER structure - * @pFwHeader: Pointer to firmware header info + * @flag: Specify which part of IOC memory is to be uploaded. * @sleepFlag: Specifies whether the process can sleep * * FwDownloadBoot requires Programmed IO access. @@ -3088,7 +3071,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * KickStart - Perform hard reset of MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @force: Force hard reset @@ -3162,12 +3145,12 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_diag_reset - Perform hard reset of the adapter. * @ioc: Pointer to MPT_ADAPTER structure * @ignore: Set if to honor and clear to ignore * the reset history bit - * @sleepFlag: CAN_SLEEP if called in a non-interrupt thread, + * @sleepflag: CAN_SLEEP if called in a non-interrupt thread, * else set to NO_SLEEP (use mdelay instead) * * This routine places the adapter in diagnostic mode via the @@ -3453,12 +3436,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * SendIocReset - Send IOCReset request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @reset_type: reset type, expected values are * %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET - * @sleepFlag: Specifies whether the process can sleep * * Send IOCReset request to the MPT adapter. * @@ -3512,12 +3494,11 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * initChainBuffers - Allocate memory for and initialize chain buffers - * @ioc: Pointer to MPT_ADAPTER structure - * - * Allocates memory for and initializes chain buffers, - * chain buffer control arrays and spinlock. +/* + * initChainBuffers - Allocate memory for and initialize + * chain buffers, chain buffer control arrays and spinlock. + * @hd: Pointer to MPT_SCSI_HOST structure + * @init: If set, initialize the spin lock. */ static int initChainBuffers(MPT_ADAPTER *ioc) @@ -3613,7 +3594,7 @@ initChainBuffers(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * PrimeIocFifos - Initialize IOC request and reply FIFOs. * @ioc: Pointer to MPT_ADAPTER structure * @@ -3910,15 +3891,15 @@ mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge +/* + * WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit + * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) * @sleepFlag: Specifies whether the process can sleep * * This routine waits (up to ~2 seconds max) for IOC doorbell - * handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS - * bit in its IntStatus register being clear. + * handshake ACKnowledge. * * Returns a negative value on failure, else wait loop count. */ @@ -3961,14 +3942,14 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit +/* + * WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit + * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) * @sleepFlag: Specifies whether the process can sleep * - * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt - * (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register. + * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt. * * Returns a negative value on failure, else wait loop count. */ @@ -4010,8 +3991,8 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * WaitForDoorbellReply - Wait for and capture an IOC handshake reply. +/* + * WaitForDoorbellReply - Wait for and capture a IOC handshake reply. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) * @sleepFlag: Specifies whether the process can sleep @@ -4096,7 +4077,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * GetLanConfigPages - Fetch LANConfig pages. * @ioc: Pointer to MPT_ADAPTER structure * @@ -4207,9 +4188,12 @@ GetLanConfigPages(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptbase_sas_persist_operation - Perform operation on SAS Persistent Table +/* + * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table * @ioc: Pointer to MPT_ADAPTER structure + * @sas_address: 64bit SAS Address for operation. + * @target_id: specified target for operation + * @bus: specified bus for operation * @persist_opcode: see below * * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for @@ -4218,7 +4202,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc) * * NOTE: Don't use not this function during interrupt time. * - * Returns 0 for success, non-zero error + * Returns: 0 for success, non-zero error */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -4415,7 +4399,7 @@ mptbase_raid_process_event_data(MPT_ADAPTER *ioc, } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * GetIoUnitPage2 - Retrieve BIOS version and boot order information. * @ioc: Pointer to MPT_ADAPTER structure * @@ -4473,8 +4457,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2 +/* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2 * @ioc: Pointer to a Adapter Strucutre * @portnum: IOC port number * @@ -4661,8 +4644,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_readScsiDevicePageHeaders - save version and length of SDP1 +/* mpt_readScsiDevicePageHeaders - save version and length of SDP1 * @ioc: Pointer to a Adapter Strucutre * @portnum: IOC port number * @@ -5014,8 +4996,9 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * SendEventNotification - Send EventNotification (on or off) request to adapter +/* + * SendEventNotification - Send EventNotification (on or off) request + * to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @EvSwitch: Event switch flags */ @@ -5079,8 +5062,8 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_config - Generic function to issue config message - * @ioc: Pointer to an adapter structure - * @pCfg: Pointer to a configuration structure. Struct contains + * @ioc - Pointer to an adapter structure + * @cfg - Pointer to a configuration structure. Struct contains * action, page address, direction, physical address * and pointer to a configuration page header * Page header is updated. @@ -5205,8 +5188,8 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_timer_expired - Callback for timer process. +/* + * mpt_timer_expired - Call back for timer process. * Used only internal config functionality. * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long */ @@ -5231,12 +5214,12 @@ mpt_timer_expired(unsigned long data) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_ioc_reset - Base cleanup for hard reset * @ioc: Pointer to the adapter structure * @reset_phase: Indicates pre- or post-reset functionality * - * Remark: Frees resources with internally generated commands. + * Remark: Free's resources with internally generated commands. */ static int mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) @@ -5288,7 +5271,7 @@ mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries. * * Returns 0 for success, non-zero for failure. @@ -5314,7 +5297,7 @@ procmpt_create(void) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries. * * Returns 0 for success, non-zero for failure. @@ -5328,16 +5311,16 @@ procmpt_destroy(void) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * procmpt_summary_read - Handle read request of a summary file +/* + * procmpt_summary_read - Handle read request from /proc/mpt/summary + * or from /proc/mpt/iocN/summary. * @buf: Pointer to area to write information * @start: Pointer to start pointer * @offset: Offset to start writing - * @request: Amount of read data requested + * @request: * @eof: Pointer to EOF integer * @data: Pointer * - * Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary. * Returns number of characters written to process performing the read. */ static int @@ -5372,12 +5355,12 @@ procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eo } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * procmpt_version_read - Handle read request from /proc/mpt/version. * @buf: Pointer to area to write information * @start: Pointer to start pointer * @offset: Offset to start writing - * @request: Amount of read data requested + * @request: * @eof: Pointer to EOF integer * @data: Pointer * @@ -5428,12 +5411,12 @@ procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eo } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info. * @buf: Pointer to area to write information * @start: Pointer to start pointer * @offset: Offset to start writing - * @request: Amount of read data requested + * @request: * @eof: Pointer to EOF integer * @data: Pointer * @@ -5594,17 +5577,16 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_HardResetHandler - Generic reset handler + * mpt_HardResetHandler - Generic reset handler, issue SCSI Task + * Management call based on input arg values. If TaskMgmt fails, + * return associated SCSI request. * @ioc: Pointer to MPT_ADAPTER structure * @sleepFlag: Indicates if sleep or schedule must be called. * - * Issues SCSI Task Management call based on input arg values. - * If TaskMgmt fails, returns associated SCSI request. - * * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) * or a non-interrupt thread. In the former, must not call schedule(). * - * Note: A return of -1 is a FATAL error case, as it means a + * Remark: A return of -1 is a FATAL error case, as it means a * FW reload/initialization failed. * * Returns 0 for SUCCESS or -1 if FAILED. @@ -5953,14 +5935,13 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * ProcessEventNotification - Route EventNotificationReply to all event handlers +/* + * ProcessEventNotification - Route a received EventNotificationReply to + * all currently regeistered event handlers. * @ioc: Pointer to MPT_ADAPTER structure * @pEventReply: Pointer to EventNotification reply frame * @evHandlers: Pointer to integer, number of event handlers * - * Routes a received EventNotificationReply to all currently registered - * event handlers. * Returns sum of event handlers return values. */ static int @@ -6075,7 +6056,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_fc_log_info - Log information returned from Fibre Channel IOC. * @ioc: Pointer to MPT_ADAPTER structure * @log_info: U32 LogInfo reply word from the IOC @@ -6096,7 +6077,7 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_spi_log_info - Log information returned from SCSI Parallel IOC. * @ioc: Pointer to MPT_ADAPTER structure * @mr: Pointer to MPT reply frame @@ -6219,7 +6200,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_sas_log_info - Log information returned from SAS IOC. * @ioc: Pointer to MPT_ADAPTER structure * @log_info: U32 LogInfo reply word from the IOC @@ -6274,7 +6255,7 @@ union loginfo_type { } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC. * @ioc: Pointer to MPT_ADAPTER structure * @ioc_status: U32 IOCStatus word from IOC @@ -6435,7 +6416,7 @@ EXPORT_SYMBOL(mpt_free_fw_memory); EXPORT_SYMBOL(mptbase_sas_persist_operation); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_init - Fusion MPT base driver initialization routine. * * Returns 0 for success, non-zero for failure. @@ -6475,7 +6456,7 @@ fusion_init(void) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_exit - Perform driver unload cleanup. * * This routine frees all resources associated with each MPT adapter diff --git a/trunk/drivers/message/fusion/mptfc.c b/trunk/drivers/message/fusion/mptfc.c index ca2f9107f145..1dd491773150 100644 --- a/trunk/drivers/message/fusion/mptfc.c +++ b/trunk/drivers/message/fusion/mptfc.c @@ -1018,10 +1018,9 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) } static void -mptfc_setup_reset(struct work_struct *work) +mptfc_setup_reset(void *arg) { - MPT_ADAPTER *ioc = - container_of(work, MPT_ADAPTER, fc_setup_reset_work); + MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; u64 pn; struct mptfc_rport_info *ri; @@ -1044,10 +1043,9 @@ mptfc_setup_reset(struct work_struct *work) } static void -mptfc_rescan_devices(struct work_struct *work) +mptfc_rescan_devices(void *arg) { - MPT_ADAPTER *ioc = - container_of(work, MPT_ADAPTER, fc_rescan_work); + MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; int ii; u64 pn; struct mptfc_rport_info *ri; @@ -1156,8 +1154,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) } spin_lock_init(&ioc->fc_rescan_work_lock); - INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); - INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); + INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); + INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); spin_lock_irqsave(&ioc->FreeQlock, flags); @@ -1395,7 +1393,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. + * mptfc_init - Register MPT adapter(s) as SCSI host(s) with + * linux scsi mid-layer. * * Returns 0 for success, non-zero for failure. */ @@ -1439,7 +1438,7 @@ mptfc_init(void) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptfc_remove - Remove fc infrastructure for devices + * mptfc_remove - Removed fc infrastructure for devices * @pdev: Pointer to pci_dev structure * */ diff --git a/trunk/drivers/message/fusion/mptlan.c b/trunk/drivers/message/fusion/mptlan.c index b7c4407c5e3f..314c3a27585d 100644 --- a/trunk/drivers/message/fusion/mptlan.c +++ b/trunk/drivers/message/fusion/mptlan.c @@ -111,8 +111,7 @@ struct mpt_lan_priv { u32 total_received; struct net_device_stats stats; /* Per device statistics */ - struct delayed_work post_buckets_task; - struct net_device *dev; + struct work_struct post_buckets_task; unsigned long post_buckets_active; }; @@ -133,7 +132,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, static int mpt_lan_open(struct net_device *dev); static int mpt_lan_reset(struct net_device *dev); static int mpt_lan_close(struct net_device *dev); -static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv); +static void mpt_lan_post_receive_buckets(void *dev_id); static void mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority); static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); @@ -346,7 +345,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; spin_unlock_irqrestore(&priv->rxfidx_lock, flags); } else { - mpt_lan_post_receive_buckets(priv); + mpt_lan_post_receive_buckets(dev); netif_wake_queue(dev); } @@ -442,7 +441,7 @@ mpt_lan_open(struct net_device *dev) dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); - mpt_lan_post_receive_buckets(priv); + mpt_lan_post_receive_buckets(dev); printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", IOC_AND_NETDEV_NAMES_s_s(dev)); @@ -855,7 +854,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { if (priority) { - schedule_delayed_work(&priv->post_buckets_task, 0); + schedule_work(&priv->post_buckets_task); } else { schedule_delayed_work(&priv->post_buckets_task, 1); dioprintk((KERN_INFO MYNAM ": post_buckets queued on " @@ -1189,9 +1188,10 @@ mpt_lan_receive_post_reply(struct net_device *dev, /* Simple SGE's only at the moment */ static void -mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) +mpt_lan_post_receive_buckets(void *dev_id) { - struct net_device *dev = priv->dev; + struct net_device *dev = dev_id; + struct mpt_lan_priv *priv = dev->priv; MPT_ADAPTER *mpt_dev = priv->mpt_dev; MPT_FRAME_HDR *mf; LANReceivePostRequest_t *pRecvReq; @@ -1335,13 +1335,6 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) clear_bit(0, &priv->post_buckets_active); } -static void -mpt_lan_post_receive_buckets_work(struct work_struct *work) -{ - mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, - post_buckets_task.work)); -} - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static struct net_device * mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) @@ -1357,13 +1350,11 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) priv = netdev_priv(dev); - priv->dev = dev; priv->mpt_dev = mpt_dev; priv->pnum = pnum; - memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task)); - INIT_DELAYED_WORK(&priv->post_buckets_task, - mpt_lan_post_receive_buckets_work); + memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); + INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); priv->post_buckets_active = 0; dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", diff --git a/trunk/drivers/message/fusion/mptsas.c b/trunk/drivers/message/fusion/mptsas.c index 4f0c530e47b0..b752a479f6db 100644 --- a/trunk/drivers/message/fusion/mptsas.c +++ b/trunk/drivers/message/fusion/mptsas.c @@ -2006,10 +2006,9 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc) *(Mutex LOCKED) */ static void -mptsas_discovery_work(struct work_struct *work) +mptsas_discovery_work(void * arg) { - struct mptsas_discovery_event *ev = - container_of(work, struct mptsas_discovery_event, work); + struct mptsas_discovery_event *ev = arg; MPT_ADAPTER *ioc = ev->ioc; mutex_lock(&ioc->sas_discovery_mutex); @@ -2069,9 +2068,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id) * Work queue thread to clear the persitency table */ static void -mptsas_persist_clear_table(struct work_struct *work) +mptsas_persist_clear_table(void * arg) { - MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); + MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); } @@ -2094,10 +2093,9 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach) * Work queue thread to handle SAS hotplug events */ static void -mptsas_hotplug_work(struct work_struct *work) +mptsas_hotplug_work(void *arg) { - struct mptsas_hotplug_event *ev = - container_of(work, struct mptsas_hotplug_event, work); + struct mptsas_hotplug_event *ev = arg; MPT_ADAPTER *ioc = ev->ioc; struct mptsas_phyinfo *phy_info; struct sas_rphy *rphy; @@ -2343,7 +2341,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, break; } - INIT_WORK(&ev->work, mptsas_hotplug_work); + INIT_WORK(&ev->work, mptsas_hotplug_work, ev); ev->ioc = ioc; ev->handle = le16_to_cpu(sas_event_data->DevHandle); ev->parent_handle = @@ -2368,7 +2366,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, * Persistent table is full. */ INIT_WORK(&ioc->sas_persist_task, - mptsas_persist_clear_table); + mptsas_persist_clear_table, (void *)ioc); schedule_work(&ioc->sas_persist_task); break; case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: @@ -2397,7 +2395,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc, return; } - INIT_WORK(&ev->work, mptsas_hotplug_work); + INIT_WORK(&ev->work, mptsas_hotplug_work, ev); ev->ioc = ioc; ev->id = raid_event_data->VolumeID; ev->event_type = MPTSAS_IGNORE_EVENT; @@ -2476,7 +2474,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) return; - INIT_WORK(&ev->work, mptsas_discovery_work); + INIT_WORK(&ev->work, mptsas_discovery_work, ev); ev->ioc = ioc; schedule_work(&ev->work); }; @@ -2513,7 +2511,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) break; case MPI_EVENT_PERSISTENT_TABLE_FULL: INIT_WORK(&ioc->sas_persist_task, - mptsas_persist_clear_table); + mptsas_persist_clear_table, + (void *)ioc); schedule_work(&ioc->sas_persist_task); break; case MPI_EVENT_SAS_DISCOVERY: diff --git a/trunk/drivers/message/fusion/mptscsih.c b/trunk/drivers/message/fusion/mptscsih.c index 2c72c36b8171..30524dc54b16 100644 --- a/trunk/drivers/message/fusion/mptscsih.c +++ b/trunk/drivers/message/fusion/mptscsih.c @@ -1230,15 +1230,15 @@ mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_proc_info - Return information about MPT adapter - * @host: scsi host struct - * @buffer: if write, user data; if read, buffer for user - * @start: returns the buffer address - * @offset: if write, 0; if read, the current offset into the buffer from - * the previous read. - * @length: if write, return length; - * @func: write = 1; read = 0 * * (linux scsi_host_template.info routine) + * + * buffer: if write, user data; if read, buffer for user + * length: if write, return length; + * offset: if write, 0; if read, the current offset into the buffer from + * the previous read. + * hostno: scsi host number + * func: if write = 1; if read = 0 */ int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, @@ -1902,7 +1902,8 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant) + * mptscsih_host_reset - Perform a SCSI host adapter RESET! + * new_eh variant * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to * * (linux scsi_host_template.eh_host_reset_handler routine) @@ -1948,7 +1949,8 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_tm_pending_wait - wait for pending task management request to complete + * mptscsih_tm_pending_wait - wait for pending task management request to + * complete. * @hd: Pointer to MPT host structure. * * Returns {SUCCESS,FAILED}. @@ -1980,7 +1982,6 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) /** * mptscsih_tm_wait_for_completion - wait for completion of TM task * @hd: Pointer to MPT host structure. - * @timeout: timeout in seconds * * Returns {SUCCESS,FAILED}. */ @@ -3428,7 +3429,8 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) /** * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks. * @hd: Pointer to a SCSI HOST structure - * @vdevice: virtual target device + * @vtarget: per device private data + * @lun: lun * * Uses the ISR, but with special processing. * MUST be single-threaded. diff --git a/trunk/drivers/message/fusion/mptspi.c b/trunk/drivers/message/fusion/mptspi.c index 36641da59289..e4cc3dd5fc9f 100644 --- a/trunk/drivers/message/fusion/mptspi.c +++ b/trunk/drivers/message/fusion/mptspi.c @@ -646,10 +646,9 @@ struct work_queue_wrapper { int disk; }; -static void mpt_work_wrapper(struct work_struct *work) +static void mpt_work_wrapper(void *data) { - struct work_queue_wrapper *wqw = - container_of(work, struct work_queue_wrapper, work); + struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; struct _MPT_SCSI_HOST *hd = wqw->hd; struct Scsi_Host *shost = hd->ioc->sh; struct scsi_device *sdev; @@ -696,7 +695,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) disk); return; } - INIT_WORK(&wqw->work, mpt_work_wrapper); + INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); wqw->hd = hd; wqw->disk = disk; @@ -785,10 +784,9 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table); * renegotiate for a given target */ static void -mptspi_dv_renegotiate_work(struct work_struct *work) +mptspi_dv_renegotiate_work(void *data) { - struct work_queue_wrapper *wqw = - container_of(work, struct work_queue_wrapper, work); + struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; struct _MPT_SCSI_HOST *hd = wqw->hd; struct scsi_device *sdev; @@ -806,7 +804,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) if (!wqw) return; - INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); + INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); wqw->hd = hd; schedule_work(&wqw->work); @@ -1100,7 +1098,8 @@ static struct pci_driver mptspi_driver = { /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. + * mptspi_init - Register MPT adapter(s) as SCSI host(s) with + * linux scsi mid-layer. * * Returns 0 for success, non-zero for failure. */ @@ -1134,6 +1133,7 @@ mptspi_init(void) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptspi_exit - Unregisters MPT adapter(s) + * */ static void __exit mptspi_exit(void) diff --git a/trunk/drivers/message/i2o/bus-osm.c b/trunk/drivers/message/i2o/bus-osm.c index c463dc2efc09..d96c687aee93 100644 --- a/trunk/drivers/message/i2o/bus-osm.c +++ b/trunk/drivers/message/i2o/bus-osm.c @@ -56,9 +56,6 @@ static int i2o_bus_scan(struct i2o_device *dev) /** * i2o_bus_store_scan - Scan the I2O Bus Adapter * @d: device which should be scanned - * @attr: device_attribute - * @buf: output buffer - * @count: buffer size * * Returns count. */ diff --git a/trunk/drivers/message/i2o/device.c b/trunk/drivers/message/i2o/device.c index b9df143e4ff1..ee183053fa23 100644 --- a/trunk/drivers/message/i2o/device.c +++ b/trunk/drivers/message/i2o/device.c @@ -54,8 +54,8 @@ static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, * @dev: I2O device to claim * @drv: I2O driver which wants to claim the device * - * Do the leg work to assign a device to a given OSM. If the claim succeeds, - * the owner is the primary. If the attempt fails a negative errno code + * Do the leg work to assign a device to a given OSM. If the claim succeed + * the owner of the rimary. If the attempt fails a negative errno code * is returned. On success zero is returned. */ int i2o_device_claim(struct i2o_device *dev) @@ -208,23 +208,24 @@ static struct i2o_device *i2o_device_alloc(void) /** * i2o_device_add - allocate a new I2O device and add it to the IOP - * @c: I2O controller that the device is on + * @iop: I2O controller where the device is on * @entry: LCT entry of the I2O device * * Allocate a new I2O device and initialize it with the LCT entry. The * device is appended to the device list of the controller. * - * Returns zero on success, or a -ve errno. + * Returns a pointer to the I2O device on success or negative error code + * on failure. */ -static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry) +static struct i2o_device *i2o_device_add(struct i2o_controller *c, + i2o_lct_entry * entry) { struct i2o_device *i2o_dev, *tmp; - int rc; i2o_dev = i2o_device_alloc(); if (IS_ERR(i2o_dev)) { printk(KERN_ERR "i2o: unable to allocate i2o device\n"); - return PTR_ERR(i2o_dev); + return i2o_dev; } i2o_dev->lct_data = *entry; @@ -235,9 +236,7 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry) i2o_dev->iop = c; i2o_dev->device.parent = &c->device; - rc = device_register(&i2o_dev->device); - if (rc) - goto err; + device_register(&i2o_dev->device); list_add_tail(&i2o_dev->list, &c->devices); @@ -271,16 +270,12 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry) pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id); - return 0; - -err: - kfree(i2o_dev); - return rc; + return i2o_dev; } /** * i2o_device_remove - remove an I2O device from the I2O core - * @i2o_dev: I2O device which should be released + * @dev: I2O device which should be released * * Is used on I2O controller removal or LCT modification, when the device * is removed from the system. Note that the device could still hang diff --git a/trunk/drivers/message/i2o/driver.c b/trunk/drivers/message/i2o/driver.c index 9104b65ff70f..64130227574f 100644 --- a/trunk/drivers/message/i2o/driver.c +++ b/trunk/drivers/message/i2o/driver.c @@ -34,7 +34,9 @@ static spinlock_t i2o_drivers_lock; static struct i2o_driver **i2o_drivers; /** - * i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM) + * i2o_bus_match - Tell if a I2O device class id match the class ids of + * the I2O driver (OSM) + * * @dev: device which should be verified * @drv: the driver to match against * @@ -230,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) break; } - INIT_WORK(&evt->work, drv->event); + INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); queue_work(drv->event_queue, &evt->work); return 1; } @@ -246,7 +248,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) /** * i2o_driver_notify_controller_add_all - Send notify of added controller - * @c: newly added controller + * to all I2O drivers * * Send notifications to all registered drivers that a new controller was * added. @@ -265,8 +267,8 @@ void i2o_driver_notify_controller_add_all(struct i2o_controller *c) } /** - * i2o_driver_notify_controller_remove_all - Send notify of removed controller - * @c: controller that is being removed + * i2o_driver_notify_controller_remove_all - Send notify of removed + * controller to all I2O drivers * * Send notifications to all registered drivers that a controller was * removed. @@ -285,8 +287,8 @@ void i2o_driver_notify_controller_remove_all(struct i2o_controller *c) } /** - * i2o_driver_notify_device_add_all - Send notify of added device - * @i2o_dev: newly added I2O device + * i2o_driver_notify_device_add_all - Send notify of added device to all + * I2O drivers * * Send notifications to all registered drivers that a device was added. */ @@ -304,8 +306,8 @@ void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev) } /** - * i2o_driver_notify_device_remove_all - Send notify of removed device - * @i2o_dev: device that is being removed + * i2o_driver_notify_device_remove_all - Send notify of removed device to + * all I2O drivers * * Send notifications to all registered drivers that a device was removed. */ @@ -360,7 +362,7 @@ int __init i2o_driver_init(void) /** * i2o_driver_exit - clean up I2O drivers (OSMs) * - * Unregisters the I2O bus and frees driver array. + * Unregisters the I2O bus and free driver array. */ void __exit i2o_driver_exit(void) { diff --git a/trunk/drivers/message/i2o/exec-osm.c b/trunk/drivers/message/i2o/exec-osm.c index 902753b2c661..a2350640384b 100644 --- a/trunk/drivers/message/i2o/exec-osm.c +++ b/trunk/drivers/message/i2o/exec-osm.c @@ -94,8 +94,8 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void) }; /** - * i2o_exec_wait_free - Free an i2o_exec_wait struct - * @wait: I2O wait data which should be cleaned up + * i2o_exec_wait_free - Free a i2o_exec_wait struct + * @i2o_exec_wait: I2O wait data which should be cleaned up */ static void i2o_exec_wait_free(struct i2o_exec_wait *wait) { @@ -105,7 +105,7 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait) /** * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers * @c: controller - * @msg: message to post + * @m: message to post * @timeout: time in seconds to wait * @dma: i2o_dma struct of the DMA buffer to free on failure * @@ -269,7 +269,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, /** * i2o_exec_show_vendor_id - Displays Vendor ID of controller * @d: device of which the Vendor ID should be displayed - * @attr: device_attribute to display * @buf: buffer into which the Vendor ID should be printed * * Returns number of bytes printed into buffer. @@ -291,7 +290,6 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, /** * i2o_exec_show_product_id - Displays Product ID of controller * @d: device of which the Product ID should be displayed - * @attr: device_attribute to display * @buf: buffer into which the Product ID should be printed * * Returns number of bytes printed into buffer. @@ -367,16 +365,14 @@ static int i2o_exec_remove(struct device *dev) /** * i2o_exec_lct_modified - Called on LCT NOTIFY reply - * @work: work struct for a specific controller + * @c: I2O controller on which the LCT has modified * * This function handles asynchronus LCT NOTIFY replies. It parses the * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY * again, otherwise send LCT NOTIFY to get informed on next LCT change. */ -static void i2o_exec_lct_modified(struct work_struct *_work) +static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) { - struct i2o_exec_lct_notify_work *work = - container_of(_work, struct i2o_exec_lct_notify_work, work); u32 change_ind = 0; struct i2o_controller *c = work->c; @@ -443,7 +439,8 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, work->c = c; - INIT_WORK(&work->work, i2o_exec_lct_modified); + INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, + work); queue_work(i2o_exec_driver.event_queue, &work->work); return 1; } @@ -463,15 +460,13 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, /** * i2o_exec_event - Event handling function - * @work: Work item in occurring event + * @evt: Event which occurs * * Handles events send by the Executive device. At the moment does not do * anything useful. */ -static void i2o_exec_event(struct work_struct *work) +static void i2o_exec_event(struct i2o_event *evt) { - struct i2o_event *evt = container_of(work, struct i2o_event, work); - if (likely(evt->i2o_dev)) osm_debug("Event received from device: %d\n", evt->i2o_dev->lct_data.tid); diff --git a/trunk/drivers/message/i2o/i2o_block.c b/trunk/drivers/message/i2o/i2o_block.c index da9859f2caf2..eaba81bf2eca 100644 --- a/trunk/drivers/message/i2o/i2o_block.c +++ b/trunk/drivers/message/i2o/i2o_block.c @@ -259,7 +259,7 @@ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) /** * i2o_block_device_power - Power management for device dev * @dev: I2O device which should receive the power management request - * @op: Operation to send + * @operation: Operation which should be send * * Send a power management request to the device dev. * @@ -315,7 +315,7 @@ static inline struct i2o_block_request *i2o_block_request_alloc(void) * i2o_block_request_free - Frees a I2O block request * @ireq: I2O block request which should be freed * - * Frees the allocated memory (give it back to the request mempool). + * Fres the allocated memory (give it back to the request mempool). */ static inline void i2o_block_request_free(struct i2o_block_request *ireq) { @@ -326,7 +326,6 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) * i2o_block_sglist_alloc - Allocate the SG list and map it * @c: I2O controller to which the request belongs * @ireq: I2O block request - * @mptr: message body pointer * * Builds the SG list and map it to be accessable by the controller. * @@ -420,18 +419,16 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) /** * i2o_block_delayed_request_fn - delayed request queue function - * @work: the delayed request with the queue to start + * delayed_request: the delayed request with the queue to start * * If the request queue is stopped for a disk, and there is no open * request, a new event is created, which calls this function to start * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never * be started again. */ -static void i2o_block_delayed_request_fn(struct work_struct *work) +static void i2o_block_delayed_request_fn(void *delayed_request) { - struct i2o_block_delayed_request *dreq = - container_of(work, struct i2o_block_delayed_request, - work.work); + struct i2o_block_delayed_request *dreq = delayed_request; struct request_queue *q = dreq->queue; unsigned long flags; @@ -491,7 +488,7 @@ static void i2o_block_end_request(struct request *req, int uptodate, * i2o_block_reply - Block OSM reply handler. * @c: I2O controller from which the message arrives * @m: message id of reply - * @msg: the actual I2O message reply + * qmsg: the actuall I2O message reply * * This function gets all the message replies. * @@ -541,9 +538,8 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, return 1; }; -static void i2o_block_event(struct work_struct *work) +static void i2o_block_event(struct i2o_event *evt) { - struct i2o_event *evt = container_of(work, struct i2o_event, work); osm_debug("event received\n"); kfree(evt); }; @@ -603,8 +599,6 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, /** * i2o_block_open - Open the block device - * @inode: inode for block device being opened - * @file: file to open * * Power up the device, mount and lock the media. This function is called, * if the block device is opened for access. @@ -632,8 +626,6 @@ static int i2o_block_open(struct inode *inode, struct file *file) /** * i2o_block_release - Release the I2O block device - * @inode: inode for block device being released - * @file: file to close * * Unlock and unmount the media, and power down the device. Gets called if * the block device is closed. @@ -680,8 +672,6 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) /** * i2o_block_ioctl - Issue device specific ioctl calls. - * @inode: inode for block device ioctl - * @file: file for ioctl * @cmd: ioctl command * @arg: arg * @@ -909,7 +899,7 @@ static int i2o_block_transfer(struct request *req) /** * i2o_block_request_fn - request queue handling function - * @q: request queue from which the request could be fetched + * q: request queue from which the request could be fetched * * Takes the next request from the queue, transfers it and if no error * occurs dequeue it from the queue. On arrival of the reply the message @@ -948,8 +938,8 @@ static void i2o_block_request_fn(struct request_queue *q) continue; dreq->queue = q; - INIT_DELAYED_WORK(&dreq->work, - i2o_block_delayed_request_fn); + INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, + dreq); if (!queue_delayed_work(i2o_block_driver.event_queue, &dreq->work, diff --git a/trunk/drivers/message/i2o/i2o_block.h b/trunk/drivers/message/i2o/i2o_block.h index 67f921b4419b..4fdaa5bda412 100644 --- a/trunk/drivers/message/i2o/i2o_block.h +++ b/trunk/drivers/message/i2o/i2o_block.h @@ -64,7 +64,7 @@ /* I2O Block OSM mempool struct */ struct i2o_block_mempool { - struct kmem_cache *slab; + kmem_cache_t *slab; mempool_t *pool; }; @@ -96,7 +96,7 @@ struct i2o_block_request { /* I2O Block device delayed request */ struct i2o_block_delayed_request { - struct delayed_work work; + struct work_struct work; struct request_queue *queue; }; diff --git a/trunk/drivers/message/i2o/i2o_config.c b/trunk/drivers/message/i2o/i2o_config.c index 1de30d711671..7d23e082bf26 100644 --- a/trunk/drivers/message/i2o/i2o_config.c +++ b/trunk/drivers/message/i2o/i2o_config.c @@ -265,11 +265,7 @@ static int i2o_cfg_swdl(unsigned long arg) return -ENOMEM; } - if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) { - i2o_msg_nop(c, msg); - i2o_dma_free(&c->pdev->dev, &buffer); - return -EFAULT; - } + __copy_from_user(buffer.virt, kxfer.buf, fragsize); msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); msg->u.head[1] = @@ -520,6 +516,7 @@ static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) return 0; } +#ifdef CONFIG_I2O_EXT_ADAPTEC #ifdef CONFIG_COMPAT static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg) @@ -762,7 +759,6 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, #endif -#ifdef CONFIG_I2O_EXT_ADAPTEC static int i2o_cfg_passthru(unsigned long arg) { struct i2o_cmd_passthru __user *cmd = diff --git a/trunk/drivers/message/i2o/i2o_proc.c b/trunk/drivers/message/i2o/i2o_proc.c index a61cb17c5c12..3d2e76eea93e 100644 --- a/trunk/drivers/message/i2o/i2o_proc.c +++ b/trunk/drivers/message/i2o/i2o_proc.c @@ -163,7 +163,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len) * i2o_get_class_name - do i2o class name lookup * @class: class number * - * Return a descriptive string for an i2o class. + * Return a descriptive string for an i2o class */ static const char *i2o_get_class_name(int class) { diff --git a/trunk/drivers/message/i2o/i2o_scsi.c b/trunk/drivers/message/i2o/i2o_scsi.c index 1045c8a518bb..6ebf38213f9f 100644 --- a/trunk/drivers/message/i2o/i2o_scsi.c +++ b/trunk/drivers/message/i2o/i2o_scsi.c @@ -220,7 +220,7 @@ static int i2o_scsi_probe(struct device *dev) u32 id = -1; u64 lun = -1; int channel = -1; - int i, rc; + int i; i2o_shost = i2o_scsi_get_host(c); if (!i2o_shost) @@ -304,20 +304,14 @@ static int i2o_scsi_probe(struct device *dev) return PTR_ERR(scsi_dev); } - rc = sysfs_create_link(&i2o_dev->device.kobj, - &scsi_dev->sdev_gendev.kobj, "scsi"); - if (rc) - goto err; + sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, + "scsi"); osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n", i2o_dev->lct_data.tid, channel, le32_to_cpu(id), (long unsigned int)le64_to_cpu(lun)); return 0; - -err: - scsi_remove_device(scsi_dev); - return rc; }; static const char *i2o_scsi_info(struct Scsi_Host *SChost) @@ -411,7 +405,8 @@ static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev) }; /** - * i2o_scsi_notify_device_remove - Retrieve notifications of removed devices + * i2o_scsi_notify_device_remove - Retrieve notifications of removed + * devices * @i2o_dev: the I2O device which was removed * * If a I2O device is removed, we catch the notification to remove the @@ -431,7 +426,8 @@ static void i2o_scsi_notify_device_remove(struct i2o_device *i2o_dev) }; /** - * i2o_scsi_notify_controller_add - Retrieve notifications of added controllers + * i2o_scsi_notify_controller_add - Retrieve notifications of added + * controllers * @c: the controller which was added * * If a I2O controller is added, we catch the notification to add a @@ -461,7 +457,8 @@ static void i2o_scsi_notify_controller_add(struct i2o_controller *c) }; /** - * i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers + * i2o_scsi_notify_controller_remove - Retrieve notifications of removed + * controllers * @c: the controller which was removed * * If a I2O controller is removed, we catch the notification to remove the @@ -748,7 +745,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) * @capacity: size in sectors * @ip: geometry array * - * This is anyone's guess quite frankly. We use the same rules everyone + * This is anyones guess quite frankly. We use the same rules everyone * else appears to and hope. It seems to work. */ diff --git a/trunk/drivers/message/i2o/pci.c b/trunk/drivers/message/i2o/pci.c index 3661e6e065d2..8287f95c8c42 100644 --- a/trunk/drivers/message/i2o/pci.c +++ b/trunk/drivers/message/i2o/pci.c @@ -259,7 +259,6 @@ static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id) /** * i2o_pci_irq_enable - Allocate interrupt for I2O controller - * @c: i2o_controller that the request is for * * Allocate an interrupt for the I2O controller, and activate interrupts * on the I2O controller. @@ -306,7 +305,7 @@ static void i2o_pci_irq_disable(struct i2o_controller *c) /** * i2o_pci_probe - Probe the PCI device for an I2O controller - * @pdev: PCI device to test + * @dev: PCI device to test * @id: id which matched with the PCI device id table * * Probe the PCI device for any device which is a memory of the @@ -448,7 +447,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, /** * i2o_pci_remove - Removes a I2O controller from the system - * @pdev: I2O controller which should be removed + * pdev: I2O controller which should be removed * * Reset the I2O controller, disable interrupts and remove all allocated * resources. diff --git a/trunk/drivers/mfd/ucb1x00-ts.c b/trunk/drivers/mfd/ucb1x00-ts.c index ce1a48108210..82938ad6ddbd 100644 --- a/trunk/drivers/mfd/ucb1x00-ts.c +++ b/trunk/drivers/mfd/ucb1x00-ts.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/misc/tifm_7xx1.c b/trunk/drivers/misc/tifm_7xx1.c index 2ab7add78f94..1ba8754e9383 100644 --- a/trunk/drivers/misc/tifm_7xx1.c +++ b/trunk/drivers/misc/tifm_7xx1.c @@ -33,10 +33,9 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) spin_unlock_irqrestore(&fm->lock, flags); } -static void tifm_7xx1_remove_media(struct work_struct *work) +static void tifm_7xx1_remove_media(void *adapter) { - struct tifm_adapter *fm = - container_of(work, struct tifm_adapter, media_remover); + struct tifm_adapter *fm = adapter; unsigned long flags; int cnt; struct tifm_dev *sock; @@ -170,10 +169,9 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) return base_addr + ((sock_num + 1) << 10); } -static void tifm_7xx1_insert_media(struct work_struct *work) +static void tifm_7xx1_insert_media(void *adapter) { - struct tifm_adapter *fm = - container_of(work, struct tifm_adapter, media_inserter); + struct tifm_adapter *fm = adapter; unsigned long flags; tifm_media_id media_id; char *card_name = "xx"; @@ -263,7 +261,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) spin_unlock_irqrestore(&fm->lock, flags); flush_workqueue(fm->wq); - tifm_7xx1_remove_media(&fm->media_remover); + tifm_7xx1_remove_media(fm); pci_set_power_state(dev, PCI_D3hot); pci_disable_device(dev); @@ -330,8 +328,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev, if (!fm->sockets) goto err_out_free; - INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media); - INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media); + INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); + INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); fm->eject = tifm_7xx1_eject; pci_set_drvdata(dev, fm); @@ -386,7 +384,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev) flush_workqueue(fm->wq); - tifm_7xx1_remove_media(&fm->media_remover); + tifm_7xx1_remove_media(fm); writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); free_irq(dev->irq, fm); diff --git a/trunk/drivers/misc/tifm_core.c b/trunk/drivers/misc/tifm_core.c index d61df5c3ac36..ee326136d03b 100644 --- a/trunk/drivers/misc/tifm_core.c +++ b/trunk/drivers/misc/tifm_core.c @@ -219,9 +219,8 @@ static int tifm_device_remove(struct device *dev) struct tifm_driver *drv = fm_dev->drv; if (drv) { - if (drv->remove) - drv->remove(fm_dev); - fm_dev->drv = NULL; + if (drv->remove) drv->remove(fm_dev); + fm_dev->drv = 0; } put_device(dev); diff --git a/trunk/drivers/mmc/mmc.c b/trunk/drivers/mmc/mmc.c index 6f2a282e2b97..9d190022a490 100644 --- a/trunk/drivers/mmc/mmc.c +++ b/trunk/drivers/mmc/mmc.c @@ -1419,16 +1419,18 @@ static void mmc_setup(struct mmc_host *host) */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { - mmc_schedule_delayed_work(&host->detect, delay); + if (delay) + mmc_schedule_delayed_work(&host->detect, delay); + else + mmc_schedule_work(&host->detect); } EXPORT_SYMBOL(mmc_detect_change); -static void mmc_rescan(struct work_struct *work) +static void mmc_rescan(void *data) { - struct mmc_host *host = - container_of(work, struct mmc_host, detect.work); + struct mmc_host *host = data; struct list_head *l, *n; unsigned char power_mode; @@ -1511,7 +1513,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_LIST_HEAD(&host->cards); - INIT_DELAYED_WORK(&host->detect, mmc_rescan); + INIT_WORK(&host->detect, mmc_rescan, host); /* * By default, hosts do not support SGIO or large requests. @@ -1609,7 +1611,7 @@ EXPORT_SYMBOL(mmc_suspend_host); */ int mmc_resume_host(struct mmc_host *host) { - mmc_rescan(&host->detect.work); + mmc_rescan(host); return 0; } diff --git a/trunk/drivers/mmc/mmc.h b/trunk/drivers/mmc/mmc.h index 149affe0b686..cd5e0ab3d84b 100644 --- a/trunk/drivers/mmc/mmc.h +++ b/trunk/drivers/mmc/mmc.h @@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host); void mmc_free_host_sysfs(struct mmc_host *host); int mmc_schedule_work(struct work_struct *work); -int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); +int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); void mmc_flush_scheduled_work(void); #endif diff --git a/trunk/drivers/mmc/mmc_sysfs.c b/trunk/drivers/mmc/mmc_sysfs.c index e334acd045bc..ac5329636045 100644 --- a/trunk/drivers/mmc/mmc_sysfs.c +++ b/trunk/drivers/mmc/mmc_sysfs.c @@ -320,10 +320,18 @@ void mmc_free_host_sysfs(struct mmc_host *host) static struct workqueue_struct *workqueue; +/* + * Internal function. Schedule work in the MMC work queue. + */ +int mmc_schedule_work(struct work_struct *work) +{ + return queue_work(workqueue, work); +} + /* * Internal function. Schedule delayed work in the MMC work queue. */ -int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) +int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) { return queue_delayed_work(workqueue, work, delay); } diff --git a/trunk/drivers/mmc/tifm_sd.c b/trunk/drivers/mmc/tifm_sd.c index e846499a004c..0fdc55b08a6d 100644 --- a/trunk/drivers/mmc/tifm_sd.c +++ b/trunk/drivers/mmc/tifm_sd.c @@ -99,7 +99,7 @@ struct tifm_sd { struct mmc_request *req; struct work_struct cmd_handler; - struct delayed_work abort_handler; + struct work_struct abort_handler; wait_queue_head_t can_eject; size_t written_blocks; @@ -496,9 +496,9 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) mmc_request_done(mmc, mrq); } -static void tifm_sd_end_cmd(struct work_struct *work) +static void tifm_sd_end_cmd(void *data) { - struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); + struct tifm_sd *host = data; struct tifm_dev *sock = host->dev; struct mmc_host *mmc = tifm_get_drvdata(sock); struct mmc_request *mrq; @@ -608,9 +608,9 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) mmc_request_done(mmc, mrq); } -static void tifm_sd_end_cmd_nodma(struct work_struct *work) +static void tifm_sd_end_cmd_nodma(void *data) { - struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); + struct tifm_sd *host = (struct tifm_sd*)data; struct tifm_dev *sock = host->dev; struct mmc_host *mmc = tifm_get_drvdata(sock); struct mmc_request *mrq; @@ -661,14 +661,11 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) mmc_request_done(mmc, mrq); } -static void tifm_sd_abort(struct work_struct *work) +static void tifm_sd_abort(void *data) { - struct tifm_sd *host = - container_of(work, struct tifm_sd, abort_handler.work); - printk(KERN_ERR DRIVER_NAME ": card failed to respond for a long period of time"); - tifm_eject(host->dev); + tifm_eject(((struct tifm_sd*)data)->dev); } static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) @@ -765,9 +762,9 @@ static struct mmc_host_ops tifm_sd_ops = { .get_ro = tifm_sd_ro }; -static void tifm_sd_register_host(struct work_struct *work) +static void tifm_sd_register_host(void *data) { - struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); + struct tifm_sd *host = (struct tifm_sd*)data; struct tifm_dev *sock = host->dev; struct mmc_host *mmc = tifm_get_drvdata(sock); unsigned long flags; @@ -775,7 +772,8 @@ static void tifm_sd_register_host(struct work_struct *work) spin_lock_irqsave(&sock->lock, flags); host->flags |= HOST_REG; PREPARE_WORK(&host->cmd_handler, - no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); + no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, + data); spin_unlock_irqrestore(&sock->lock, flags); dev_dbg(&sock->dev, "adding host\n"); mmc_add_host(mmc); @@ -801,8 +799,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) host->dev = sock; host->clk_div = 61; init_waitqueue_head(&host->can_eject); - INIT_WORK(&host->cmd_handler, tifm_sd_register_host); - INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); + INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); + INIT_WORK(&host->abort_handler, tifm_sd_abort, host); tifm_set_drvdata(sock, mmc); sock->signal_irq = tifm_sd_signal_irq; diff --git a/trunk/drivers/mtd/devices/m25p80.c b/trunk/drivers/mtd/devices/m25p80.c index 334e078ffaff..ef4a731ca5c2 100644 --- a/trunk/drivers/mtd/devices/m25p80.c +++ b/trunk/drivers/mtd/devices/m25p80.c @@ -451,7 +451,7 @@ static int __devinit m25p_probe(struct spi_device *spi) return -ENODEV; } - flash = kzalloc(sizeof *flash, GFP_KERNEL); + flash = kzalloc(sizeof *flash, SLAB_KERNEL); if (!flash) return -ENOMEM; diff --git a/trunk/drivers/net/8139too.c b/trunk/drivers/net/8139too.c index 931028f672de..d02ed51abfcc 100644 --- a/trunk/drivers/net/8139too.c +++ b/trunk/drivers/net/8139too.c @@ -594,7 +594,7 @@ struct rtl8139_private { u32 rx_config; struct rtl_extra_stats xstats; - struct delayed_work thread; + struct work_struct thread; struct mii_if_info mii; unsigned int regs_len; @@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); -static void rtl8139_thread (struct work_struct *work); -static void rtl8139_tx_timeout_task(struct work_struct *work); +static void rtl8139_thread (void *_data); +static void rtl8139_tx_timeout_task(void *_data); static const struct ethtool_ops rtl8139_ethtool_ops; /* write MMIO register, with flush */ @@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); spin_lock_init (&tp->lock); spin_lock_init (&tp->rx_lock); - INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + INIT_WORK(&tp->thread, rtl8139_thread, dev); tp->mii.dev = dev; tp->mii.mdio_read = mdio_read; tp->mii.mdio_write = mdio_write; @@ -1596,16 +1596,15 @@ static inline void rtl8139_thread_iter (struct net_device *dev, RTL_R8 (Config1)); } -static void rtl8139_thread (struct work_struct *work) +static void rtl8139_thread (void *_data) { - struct rtl8139_private *tp = - container_of(work, struct rtl8139_private, thread.work); - struct net_device *dev = tp->mii.dev; + struct net_device *dev = _data; + struct rtl8139_private *tp = netdev_priv(dev); unsigned long thr_delay = next_tick; if (tp->watchdog_fired) { tp->watchdog_fired = 0; - rtl8139_tx_timeout_task(work); + rtl8139_tx_timeout_task(_data); } else if (rtnl_trylock()) { rtl8139_thread_iter (dev, tp, tp->mmio_addr); rtnl_unlock (); @@ -1647,11 +1646,10 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp) /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ } -static void rtl8139_tx_timeout_task (struct work_struct *work) +static void rtl8139_tx_timeout_task (void *_data) { - struct rtl8139_private *tp = - container_of(work, struct rtl8139_private, thread.work); - struct net_device *dev = tp->mii.dev; + struct net_device *dev = _data; + struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int i; u8 tmp8; @@ -1697,7 +1695,7 @@ static void rtl8139_tx_timeout (struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); if (!tp->have_thread) { - INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task); + INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); schedule_delayed_work(&tp->thread, next_tick); } else tp->watchdog_fired = 1; diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index 5bacb7587df4..fc2f1d1c7ead 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev) } static void -bnx2_reset_task(struct work_struct *work) +bnx2_reset_task(void *data) { - struct bnx2 *bp = container_of(work, struct bnx2, reset_task); + struct bnx2 *bp = data; if (!netif_running(bp->dev)) return; @@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->pdev = pdev; spin_lock_init(&bp->phy_lock); - INIT_WORK(&bp->reset_task, bnx2_reset_task); + INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); diff --git a/trunk/drivers/net/cassini.c b/trunk/drivers/net/cassini.c index c8126484c2be..fd2cc13f7d97 100644 --- a/trunk/drivers/net/cassini.c +++ b/trunk/drivers/net/cassini.c @@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp) return 0; } -static void cas_reset_task(struct work_struct *work) +static void cas_reset_task(void *data) { - struct cas *cp = container_of(work, struct cas, reset_task); + struct cas *cp = (struct cas *) data; #if 0 int pending = atomic_read(&cp->reset_task_pending); #else @@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, atomic_set(&cp->reset_task_pending_spare, 0); atomic_set(&cp->reset_task_pending_mtu, 0); #endif - INIT_WORK(&cp->reset_task, cas_reset_task); + INIT_WORK(&cp->reset_task, cas_reset_task, cp); /* Default link parameters */ if (link_mode >= 0 && link_mode <= 6) diff --git a/trunk/drivers/net/chelsio/common.h b/trunk/drivers/net/chelsio/common.h index 74758d2c7af8..b265941e1372 100644 --- a/trunk/drivers/net/chelsio/common.h +++ b/trunk/drivers/net/chelsio/common.h @@ -279,7 +279,7 @@ struct adapter { struct petp *tp; struct port_info port[MAX_NPORTS]; - struct delayed_work stats_update_task; + struct work_struct stats_update_task; struct timer_list stats_update_timer; spinlock_t tpi_lock; diff --git a/trunk/drivers/net/chelsio/cphy.h b/trunk/drivers/net/chelsio/cphy.h index cf9143499882..60901f25014e 100644 --- a/trunk/drivers/net/chelsio/cphy.h +++ b/trunk/drivers/net/chelsio/cphy.h @@ -91,7 +91,7 @@ struct cphy { int state; /* Link status state machine */ adapter_t *adapter; /* associated adapter */ - struct delayed_work phy_update; + struct work_struct phy_update; u16 bmsr; int count; diff --git a/trunk/drivers/net/chelsio/cxgb2.c b/trunk/drivers/net/chelsio/cxgb2.c index de48eadddbc4..53bec6739812 100644 --- a/trunk/drivers/net/chelsio/cxgb2.c +++ b/trunk/drivers/net/chelsio/cxgb2.c @@ -953,11 +953,10 @@ static void t1_netpoll(struct net_device *dev) * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */ -static void mac_stats_task(struct work_struct *work) +static void mac_stats_task(void *data) { int i; - struct adapter *adapter = - container_of(work, struct adapter, stats_update_task.work); + struct adapter *adapter = data; for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; @@ -978,10 +977,9 @@ static void mac_stats_task(struct work_struct *work) /* * Processes elmer0 external interrupts in process context. */ -static void ext_intr_task(struct work_struct *work) +static void ext_intr_task(void *data) { - struct adapter *adapter = - container_of(work, struct adapter, ext_intr_handler_task); + struct adapter *adapter = data; t1_elmer0_ext_intr_handler(adapter); @@ -1115,9 +1113,9 @@ static int __devinit init_one(struct pci_dev *pdev, spin_lock_init(&adapter->mac_lock); INIT_WORK(&adapter->ext_intr_handler_task, - ext_intr_task); - INIT_DELAYED_WORK(&adapter->stats_update_task, - mac_stats_task); + ext_intr_task, adapter); + INIT_WORK(&adapter->stats_update_task, mac_stats_task, + adapter); pci_set_drvdata(pdev, netdev); } diff --git a/trunk/drivers/net/chelsio/my3126.c b/trunk/drivers/net/chelsio/my3126.c index c7731b6f9de3..0b90014d5b3e 100644 --- a/trunk/drivers/net/chelsio/my3126.c +++ b/trunk/drivers/net/chelsio/my3126.c @@ -93,11 +93,9 @@ static int my3126_interrupt_handler(struct cphy *cphy) return cphy_cause_link_change; } -static void my3216_poll(struct work_struct *work) +static void my3216_poll(void *arg) { - struct cphy *cphy = container_of(work, struct cphy, phy_update.work); - - my3126_interrupt_handler(cphy); + my3126_interrupt_handler(arg); } static int my3126_set_loopback(struct cphy *cphy, int on) @@ -173,7 +171,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter, if (cphy) cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); - INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); + INIT_WORK(&cphy->phy_update, my3216_poll, cphy); cphy->bmsr = 0; return (cphy); diff --git a/trunk/drivers/net/e100.c b/trunk/drivers/net/e100.c index 03bf164f9e8d..3a8df479cbda 100644 --- a/trunk/drivers/net/e100.c +++ b/trunk/drivers/net/e100.c @@ -2102,10 +2102,9 @@ static void e100_tx_timeout(struct net_device *netdev) schedule_work(&nic->tx_timeout_task); } -static void e100_tx_timeout_task(struct work_struct *work) +static void e100_tx_timeout_task(struct net_device *netdev) { - struct nic *nic = container_of(work, struct nic, tx_timeout_task); - struct net_device *netdev = nic->netdev; + struct nic *nic = netdev_priv(netdev); DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", readb(&nic->csr->scb.status)); @@ -2638,7 +2637,8 @@ static int __devinit e100_probe(struct pci_dev *pdev, nic->blink_timer.function = e100_blink_led; nic->blink_timer.data = (unsigned long)nic; - INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + INIT_WORK(&nic->tx_timeout_task, + (void (*)(void *))e100_tx_timeout_task, netdev); if((err = e100_alloc(nic))) { DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index 73f3a85fd238..32dde0adb683 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev); static void e1000_enter_82542_rst(struct e1000_adapter *adapter); static void e1000_leave_82542_rst(struct e1000_adapter *adapter); static void e1000_tx_timeout(struct net_device *dev); -static void e1000_reset_task(struct work_struct *work); +static void e1000_reset_task(struct net_device *dev); static void e1000_smartspeed(struct e1000_adapter *adapter); static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb); @@ -914,7 +914,8 @@ e1000_probe(struct pci_dev *pdev, adapter->phy_info_timer.function = &e1000_update_phy_info; adapter->phy_info_timer.data = (unsigned long) adapter; - INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->reset_task, + (void (*)(void *))e1000_reset_task, netdev); e1000_check_options(adapter); @@ -3305,10 +3306,9 @@ e1000_tx_timeout(struct net_device *netdev) } static void -e1000_reset_task(struct work_struct *work) +e1000_reset_task(struct net_device *netdev) { - struct e1000_adapter *adapter = - container_of(work, struct e1000_adapter, reset_task); + struct e1000_adapter *adapter = netdev_priv(netdev); e1000_reinit_locked(adapter); } diff --git a/trunk/drivers/net/ehea/ehea_main.c b/trunk/drivers/net/ehea/ehea_main.c index 83fa32f72398..6ad696101418 100644 --- a/trunk/drivers/net/ehea/ehea_main.c +++ b/trunk/drivers/net/ehea/ehea_main.c @@ -2224,12 +2224,11 @@ static int ehea_stop(struct net_device *dev) return ret; } -static void ehea_reset_port(struct work_struct *work) +static void ehea_reset_port(void *data) { int ret; - struct ehea_port *port = - container_of(work, struct ehea_port, reset_task); - struct net_device *dev = port->netdev; + struct net_device *dev = data; + struct ehea_port *port = netdev_priv(dev); port->resets++; down(&port->port_lock); @@ -2380,7 +2379,7 @@ static int ehea_setup_single_port(struct ehea_port *port, dev->tx_timeout = &ehea_tx_watchdog; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; - INIT_WORK(&port->reset_task, ehea_reset_port); + INIT_WORK(&port->reset_task, ehea_reset_port, dev); ehea_set_ethtool_ops(dev); diff --git a/trunk/drivers/net/hamradio/baycom_epp.c b/trunk/drivers/net/hamradio/baycom_epp.c index 3c33d6f6a6a6..1ed9cccd3c11 100644 --- a/trunk/drivers/net/hamradio/baycom_epp.c +++ b/trunk/drivers/net/hamradio/baycom_epp.c @@ -168,9 +168,8 @@ struct baycom_state { int magic; struct pardevice *pdev; - struct net_device *dev; unsigned int work_running; - struct delayed_work run_work; + struct work_struct run_work; unsigned int modem; unsigned int bitrate; unsigned char stat; @@ -660,18 +659,16 @@ static int receive(struct net_device *dev, int cnt) #define GETTICK(x) #endif /* __i386__ */ -static void epp_bh(struct work_struct *work) +static void epp_bh(struct net_device *dev) { - struct net_device *dev; struct baycom_state *bc; struct parport *pp; unsigned char stat; unsigned char tmp[2]; unsigned int time1 = 0, time2 = 0, time3 = 0; int cnt, cnt2; - - bc = container_of(work, struct baycom_state, run_work.work); - dev = bc->dev; + + bc = netdev_priv(dev); if (!bc->work_running) return; baycom_int_freq(bc); @@ -892,7 +889,7 @@ static int epp_open(struct net_device *dev) return -EBUSY; } dev->irq = /*pp->irq*/ 0; - INIT_DELAYED_WORK(&bc->run_work, epp_bh); + INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); bc->work_running = 1; bc->modem = EPP_CONVENTIONAL; if (eppconfig(bc)) @@ -1216,7 +1213,6 @@ static void __init baycom_epp_dev_setup(struct net_device *dev) /* * initialize part of the baycom_state struct */ - bc->dev = dev; bc->magic = BAYCOM_MAGIC; bc->cfg.fclk = 19666600; bc->cfg.bps = 9600; diff --git a/trunk/drivers/net/hamradio/dmascc.c b/trunk/drivers/net/hamradio/dmascc.c index e6e721aff6f6..0f8b9afd55b4 100644 --- a/trunk/drivers/net/hamradio/dmascc.c +++ b/trunk/drivers/net/hamradio/dmascc.c @@ -252,7 +252,7 @@ static inline void z8530_isr(struct scc_info *info); static irqreturn_t scc_isr(int irq, void *dev_id); static void rx_isr(struct scc_priv *priv); static void special_condition(struct scc_priv *priv, int rc); -static void rx_bh(struct work_struct *); +static void rx_bh(void *arg); static void tx_isr(struct scc_priv *priv); static void es_isr(struct scc_priv *priv); static void tm_isr(struct scc_priv *priv); @@ -579,7 +579,7 @@ static int __init setup_adapter(int card_base, int type, int n) priv->param.clocks = TCTRxCP | RCRTxCP; priv->param.persist = 256; priv->param.dma = -1; - INIT_WORK(&priv->rx_work, rx_bh); + INIT_WORK(&priv->rx_work, rx_bh, priv); dev->priv = priv; sprintf(dev->name, "dmascc%i", 2 * n + i); dev->base_addr = card_base; @@ -1272,9 +1272,9 @@ static void special_condition(struct scc_priv *priv, int rc) } -static void rx_bh(struct work_struct *ugli_api) +static void rx_bh(void *arg) { - struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work); + struct scc_priv *priv = arg; int i = priv->rx_tail; int cb; unsigned long flags; diff --git a/trunk/drivers/net/irda/mcs7780.c b/trunk/drivers/net/irda/mcs7780.c index f0c61f3b2a82..b32c52ed19d7 100644 --- a/trunk/drivers/net/irda/mcs7780.c +++ b/trunk/drivers/net/irda/mcs7780.c @@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs, return ret; } -static void mcs_speed_work(struct work_struct *work) +static void mcs_speed_work(void *arg) { - struct mcs_cb *mcs = container_of(work, struct mcs_cb, work); + struct mcs_cb *mcs = arg; struct net_device *netdev = mcs->netdev; mcs_speed_change(mcs); @@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf, irda_qos_bits_to_value(&mcs->qos); /* Speed change work initialisation*/ - INIT_WORK(&mcs->work, mcs_speed_work); + INIT_WORK(&mcs->work, mcs_speed_work, mcs); /* Override the network functions we need to use */ ndev->hard_start_xmit = mcs_hard_xmit; diff --git a/trunk/drivers/net/irda/sir-dev.h b/trunk/drivers/net/irda/sir-dev.h index 2a57bc67ce35..9fa294a546d6 100644 --- a/trunk/drivers/net/irda/sir-dev.h +++ b/trunk/drivers/net/irda/sir-dev.h @@ -22,7 +22,7 @@ struct sir_fsm { struct semaphore sem; - struct delayed_work work; + struct work_struct work; unsigned state, substate; int param; int result; diff --git a/trunk/drivers/net/irda/sir_dev.c b/trunk/drivers/net/irda/sir_dev.c index 17b0c3ab6201..3b5854d10c17 100644 --- a/trunk/drivers/net/irda/sir_dev.c +++ b/trunk/drivers/net/irda/sir_dev.c @@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev) * Both must be unlocked/restarted on completion - but only on final exit. */ -static void sirdev_config_fsm(struct work_struct *work) +static void sirdev_config_fsm(void *data) { - struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); + struct sir_dev *dev = data; struct sir_fsm *fsm = &dev->fsm; int next_state; int ret = -1; @@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par fsm->param = param; fsm->result = 0; - INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); - queue_delayed_work(irda_sir_wq, &fsm->work, 0); + INIT_WORK(&fsm->work, sirdev_config_fsm, dev); + queue_work(irda_sir_wq, &fsm->work); return 0; } diff --git a/trunk/drivers/net/irda/stir4200.c b/trunk/drivers/net/irda/stir4200.c index c14a74634fd5..3b4c47875935 100644 --- a/trunk/drivers/net/irda/stir4200.c +++ b/trunk/drivers/net/irda/stir4200.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/net/iseries_veth.c b/trunk/drivers/net/iseries_veth.c index d6f4f185bf37..2284e2ce1692 100644 --- a/trunk/drivers/net/iseries_veth.c +++ b/trunk/drivers/net/iseries_veth.c @@ -166,7 +166,7 @@ struct veth_msg { struct veth_lpar_connection { HvLpIndex remote_lp; - struct delayed_work statemachine_wq; + struct work_struct statemachine_wq; struct veth_msg *msgs; int num_events; struct veth_cap_data local_caps; @@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = { static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) { - schedule_delayed_work(&cnx->statemachine_wq, 0); + schedule_work(&cnx->statemachine_wq); } static void veth_take_cap(struct veth_lpar_connection *cnx, @@ -638,11 +638,9 @@ static int veth_process_caps(struct veth_lpar_connection *cnx) } /* FIXME: The gotos here are a bit dubious */ -static void veth_statemachine(struct work_struct *work) +static void veth_statemachine(void *p) { - struct veth_lpar_connection *cnx = - container_of(work, struct veth_lpar_connection, - statemachine_wq.work); + struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; int rlp = cnx->remote_lp; int rc; @@ -829,7 +827,7 @@ static int veth_init_connection(u8 rlp) cnx->remote_lp = rlp; spin_lock_init(&cnx->lock); - INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine); + INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); init_timer(&cnx->ack_timer); cnx->ack_timer.function = veth_timed_ack; diff --git a/trunk/drivers/net/ixgb/ixgb_main.c b/trunk/drivers/net/ixgb/ixgb_main.c index e628126c9c49..7b127212e62b 100644 --- a/trunk/drivers/net/ixgb/ixgb_main.c +++ b/trunk/drivers/net/ixgb/ixgb_main.c @@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); void ixgb_set_ethtool_ops(struct net_device *netdev); static void ixgb_tx_timeout(struct net_device *dev); -static void ixgb_tx_timeout_task(struct work_struct *work); +static void ixgb_tx_timeout_task(struct net_device *dev); static void ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); @@ -489,7 +489,8 @@ ixgb_probe(struct pci_dev *pdev, adapter->watchdog_timer.function = &ixgb_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; - INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); + INIT_WORK(&adapter->tx_timeout_task, + (void (*)(void *))ixgb_tx_timeout_task, netdev); strcpy(netdev->name, "eth%d"); if((err = register_netdev(netdev))) @@ -1492,10 +1493,9 @@ ixgb_tx_timeout(struct net_device *netdev) } static void -ixgb_tx_timeout_task(struct work_struct *work) +ixgb_tx_timeout_task(struct net_device *netdev) { - struct ixgb_adapter *adapter = - container_of(work, struct ixgb_adapter, tx_timeout_task); + struct ixgb_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; ixgb_down(adapter, TRUE); diff --git a/trunk/drivers/net/lasi_82596.c b/trunk/drivers/net/lasi_82596.c index ea392f2a5aa2..f4d815bca643 100644 --- a/trunk/drivers/net/lasi_82596.c +++ b/trunk/drivers/net/lasi_82596.c @@ -119,14 +119,14 @@ #define DEB(x,y) if (i596_debug & (x)) { y; } -#define CHECK_WBACK(priv, addr,len) \ - do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0) +#define CHECK_WBACK(addr,len) \ + do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0) -#define CHECK_INV(priv, addr,len) \ - do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0) +#define CHECK_INV(addr,len) \ + do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0) -#define CHECK_WBACK_INV(priv, addr,len) \ - do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) +#define CHECK_WBACK_INV(addr,len) \ + do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0) #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ @@ -449,10 +449,10 @@ static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x) static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) { - CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp)); + CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); while (--delcnt && lp->iscp.stat) { udelay(10); - CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp)); + CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); } if (!delcnt) { printk("%s: %s, iscp.stat %04x, didn't clear\n", @@ -466,10 +466,10 @@ static inline int wait_istat(struct net_device *dev, struct i596_private *lp, in static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) { - CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); while (--delcnt && lp->scb.command) { udelay(10); - CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); } if (!delcnt) { printk("%s: %s, status %4.4x, cmd %4.4x.\n", @@ -522,7 +522,7 @@ static void i596_display_data(struct net_device *dev) rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); rbd = rbd->v_next; } while (rbd != lp->rbd_head); - CHECK_INV(lp, lp, sizeof(struct i596_private)); + CHECK_INV(lp, sizeof(struct i596_private)); } @@ -592,7 +592,7 @@ static inline void init_rx_bufs(struct net_device *dev) rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds)); rfd->cmd = CMD_EOL|CMD_FLEX; - CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private)); + CHECK_WBACK_INV(lp, sizeof(struct i596_private)); } static inline void remove_rx_bufs(struct net_device *dev) @@ -629,7 +629,7 @@ static void rebuild_rx_bufs(struct net_device *dev) lp->rbd_head = lp->rbds; lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds)); - CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private)); + CHECK_WBACK_INV(lp, sizeof(struct i596_private)); } @@ -663,8 +663,8 @@ static int init_i596_mem(struct net_device *dev) DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name)); - CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp)); - CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp)); + CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp)); + CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp)); MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp)); @@ -678,25 +678,25 @@ static int init_i596_mem(struct net_device *dev) rebuild_rx_bufs(dev); lp->scb.command = 0; - CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); enable_irq(dev->irq); /* enable IRQs from LAN */ DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name)); memcpy(lp->cf_cmd.i596_config, init_setup, 14); lp->cf_cmd.cmd.command = CmdConfigure; - CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd)); + CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd)); i596_add_cmd(dev, &lp->cf_cmd.cmd); DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name)); memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); lp->sa_cmd.cmd.command = CmdSASetup; - CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd)); + CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd)); i596_add_cmd(dev, &lp->sa_cmd.cmd); DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name)); lp->tdr_cmd.cmd.command = CmdTDR; - CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd)); + CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd)); i596_add_cmd(dev, &lp->tdr_cmd.cmd); spin_lock_irqsave (&lp->lock, flags); @@ -708,7 +708,7 @@ static int init_i596_mem(struct net_device *dev) DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name)); lp->scb.command = RX_START; lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds)); - CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); CA(dev); @@ -740,13 +740,13 @@ static inline int i596_rx(struct net_device *dev) rfd = lp->rfd_head; /* Ref next frame to check */ - CHECK_INV(lp, rfd, sizeof(struct i596_rfd)); + CHECK_INV(rfd, sizeof(struct i596_rfd)); while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ if (rfd->rbd == I596_NULL) rbd = NULL; else if (rfd->rbd == lp->rbd_head->b_addr) { rbd = lp->rbd_head; - CHECK_INV(lp, rbd, sizeof(struct i596_rbd)); + CHECK_INV(rbd, sizeof(struct i596_rbd)); } else { printk("%s: rbd chain broken!\n", dev->name); @@ -790,7 +790,7 @@ static inline int i596_rx(struct net_device *dev) dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); rbd->v_data = newskb->data; rbd->b_data = WSWAPchar(dma_addr); - CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd)); + CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); } else skb = dev_alloc_skb(pkt_len + 2); @@ -842,7 +842,7 @@ static inline int i596_rx(struct net_device *dev) if (rbd != NULL && (rbd->count & 0x4000)) { rbd->count = 0; lp->rbd_head = rbd->v_next; - CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd)); + CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); } /* Tidy the frame descriptor, marking it as end of list */ @@ -860,10 +860,10 @@ static inline int i596_rx(struct net_device *dev) lp->scb.rfd = rfd->b_next; lp->rfd_head = rfd->v_next; - CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd)); - CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd)); + CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd)); + CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd)); rfd = lp->rfd_head; - CHECK_INV(lp, rfd, sizeof(struct i596_rfd)); + CHECK_INV(rfd, sizeof(struct i596_rfd)); } DEB(DEB_RXFRAME, printk("frames %d\n", frames)); @@ -902,12 +902,12 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private ptr->v_next = NULL; ptr->b_next = I596_NULL; } - CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd)); + CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd)); } wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out"); lp->scb.cmd = I596_NULL; - CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); } @@ -925,7 +925,7 @@ static inline void i596_reset(struct net_device *dev, struct i596_private *lp) /* FIXME: this command might cause an lpmc */ lp->scb.command = CUC_ABORT | RX_ABORT; - CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); CA(dev); /* wait for shutdown */ @@ -951,20 +951,20 @@ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) cmd->command |= (CMD_EOL | CMD_INTR); cmd->v_next = NULL; cmd->b_next = I596_NULL; - CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd)); + CHECK_WBACK(cmd, sizeof(struct i596_cmd)); spin_lock_irqsave (&lp->lock, flags); if (lp->cmd_head != NULL) { lp->cmd_tail->v_next = cmd; lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status)); - CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd)); + CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd)); } else { lp->cmd_head = cmd; wait_cmd(dev, lp, 100, "i596_add_cmd timed out"); lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status)); lp->scb.command = CUC_START; - CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); CA(dev); } lp->cmd_tail = cmd; @@ -998,12 +998,12 @@ static int i596_test(struct net_device *dev) data = virt_to_dma(lp,tint); tint[1] = -1; - CHECK_WBACK(lp, tint, PAGE_SIZE); + CHECK_WBACK(tint,PAGE_SIZE); MPU_PORT(dev, 1, data); for(data = 1000000; data; data--) { - CHECK_INV(lp, tint, PAGE_SIZE); + CHECK_INV(tint,PAGE_SIZE); if(tint[1] != -1) break; @@ -1061,7 +1061,7 @@ static void i596_tx_timeout (struct net_device *dev) /* Issue a channel attention signal */ DEB(DEB_ERRORS, printk("Kicking board.\n")); lp->scb.command = CUC_START | RX_START; - CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); + CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb)); CA (dev); lp->last_restart = lp->stats.tx_packets; } @@ -1118,8 +1118,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) tbd->data = WSWAPchar(tx_cmd->dma_addr); DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); - CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd)); - CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd)); + CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd)); + CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd)); i596_add_cmd(dev, &tx_cmd->cmd); lp->stats.tx_packets++; @@ -1228,7 +1228,7 @@ static int __devinit i82596_probe(struct net_device *dev, lp->dma_addr = dma_addr; lp->dev = gen_dev; - CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private)); + CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private)); i = register_netdev(dev); if (i) { @@ -1295,7 +1295,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); while (lp->cmd_head != NULL) { - CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd)); + CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd)); if (!(lp->cmd_head->status & STAT_C)) break; @@ -1358,7 +1358,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) } ptr->v_next = NULL; ptr->b_next = I596_NULL; - CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd)); + CHECK_WBACK(ptr, sizeof(struct i596_cmd)); lp->last_cmd = jiffies; } @@ -1372,13 +1372,13 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) ptr->command &= 0x1fff; ptr = ptr->v_next; - CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd)); + CHECK_WBACK_INV(prev, sizeof(struct i596_cmd)); } if ((lp->cmd_head != NULL)) ack_cmd |= CUC_START; lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status)); - CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb)); + CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb)); } if ((status & 0x1000) || (status & 0x4000)) { if ((status & 0x4000)) @@ -1397,7 +1397,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) } wait_cmd(dev, lp, 100, "i596 interrupt, timeout"); lp->scb.command = ack_cmd; - CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb)); + CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); /* DANGER: I suspect that some kind of interrupt acknowledgement aside from acking the 82596 might be needed @@ -1426,7 +1426,7 @@ static int i596_close(struct net_device *dev) wait_cmd(dev, lp, 100, "close1 timed out"); lp->scb.command = CUC_ABORT | RX_ABORT; - CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb)); + CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); CA(dev); @@ -1486,7 +1486,7 @@ static void set_multicast_list(struct net_device *dev) dev->name); else { lp->cf_cmd.cmd.command = CmdConfigure; - CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd)); + CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd)); i596_add_cmd(dev, &lp->cf_cmd.cmd); } } @@ -1514,7 +1514,7 @@ static void set_multicast_list(struct net_device *dev) DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5])); } - CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd)); + CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd)); i596_add_cmd(dev, &cmd->cmd); } } diff --git a/trunk/drivers/net/mv643xx_eth.c b/trunk/drivers/net/mv643xx_eth.c index d9f48bb04b05..9997081c6dae 100644 --- a/trunk/drivers/net/mv643xx_eth.c +++ b/trunk/drivers/net/mv643xx_eth.c @@ -277,11 +277,9 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev) * * Actual routine to reset the adapter when a timeout on Tx has occurred */ -static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) +static void mv643xx_eth_tx_timeout_task(struct net_device *dev) { - struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, - tx_timeout_task); - struct net_device *dev = mp->mii.dev; /* yuck */ + struct mv643xx_private *mp = netdev_priv(dev); if (!netif_running(dev)) return; @@ -1362,7 +1360,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) #endif /* Configure the timeout task */ - INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task); + INIT_WORK(&mp->tx_timeout_task, + (void (*)(void *))mv643xx_eth_tx_timeout_task, dev); spin_lock_init(&mp->lock); diff --git a/trunk/drivers/net/myri10ge/myri10ge.c b/trunk/drivers/net/myri10ge/myri10ge.c index 38df42802386..36350e6db1c1 100644 --- a/trunk/drivers/net/myri10ge/myri10ge.c +++ b/trunk/drivers/net/myri10ge/myri10ge.c @@ -2615,10 +2615,9 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) * This watchdog is used to check whether the board has suffered * from a parity error and needs to be recovered. */ -static void myri10ge_watchdog(struct work_struct *work) +static void myri10ge_watchdog(void *arg) { - struct myri10ge_priv *mgp = - container_of(work, struct myri10ge_priv, watchdog_work); + struct myri10ge_priv *mgp = arg; u32 reboot; int status; u16 cmd, vendor; @@ -2888,7 +2887,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long)mgp); SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); - INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); + INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); status = register_netdev(netdev); if (status != 0) { dev_err(&pdev->dev, "register_netdev failed: %d\n", status); diff --git a/trunk/drivers/net/netxen/netxen_nic.h b/trunk/drivers/net/netxen/netxen_nic.h index 9c588af8ab74..d925053fe597 100644 --- a/trunk/drivers/net/netxen/netxen_nic.h +++ b/trunk/drivers/net/netxen/netxen_nic.h @@ -714,7 +714,6 @@ struct netxen_adapter { spinlock_t lock; struct work_struct watchdog_task; struct work_struct tx_timeout_task; - struct net_device *netdev; struct timer_list watchdog_timer; u32 curr_window; @@ -922,7 +921,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, struct netxen_port *port); int netxen_nic_rx_has_work(struct netxen_adapter *adapter); int netxen_nic_tx_has_work(struct netxen_adapter *adapter); -void netxen_watchdog_task(struct work_struct *work); +void netxen_watchdog_task(unsigned long v); void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid); void netxen_process_cmd_ring(unsigned long data); diff --git a/trunk/drivers/net/netxen/netxen_nic_init.c b/trunk/drivers/net/netxen/netxen_nic_init.c index eae18236aefa..0dca029bc3e5 100644 --- a/trunk/drivers/net/netxen/netxen_nic_init.c +++ b/trunk/drivers/net/netxen/netxen_nic_init.c @@ -710,13 +710,12 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter) return rv; } -void netxen_watchdog_task(struct work_struct *work) +void netxen_watchdog_task(unsigned long v) { int port_num; struct netxen_port *port; struct net_device *netdev; - struct netxen_adapter *adapter = - container_of(work, struct netxen_adapter, watchdog_task); + struct netxen_adapter *adapter = (struct netxen_adapter *)v; if (netxen_nic_check_temp(adapter)) return; diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c index df0bb36a1cfb..1cb662d5bd76 100644 --- a/trunk/drivers/net/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/netxen/netxen_nic_main.c @@ -64,7 +64,7 @@ static int netxen_nic_open(struct net_device *netdev); static int netxen_nic_close(struct net_device *netdev); static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); static void netxen_tx_timeout(struct net_device *netdev); -static void netxen_tx_timeout_task(struct work_struct *work); +static void netxen_tx_timeout_task(struct net_device *netdev); static void netxen_watchdog(unsigned long); static int netxen_handle_int(struct netxen_adapter *, struct net_device *); static int netxen_nic_ioctl(struct net_device *netdev, @@ -274,7 +274,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->ahw.xg_linkup = 0; adapter->watchdog_timer.function = &netxen_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; - INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); + INIT_WORK(&adapter->watchdog_task, + (void (*)(void *))netxen_watchdog_task, adapter); adapter->ahw.pdev = pdev; adapter->proc_cmd_buf_counter = 0; pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); @@ -378,8 +379,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_addr); } } - adapter->netdev = netdev; - INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); + INIT_WORK(&adapter->tx_timeout_task, + (void (*)(void *))netxen_tx_timeout_task, netdev); netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -937,20 +938,18 @@ static void netxen_tx_timeout(struct net_device *netdev) schedule_work(&adapter->tx_timeout_task); } -static void netxen_tx_timeout_task(struct work_struct *work) +static void netxen_tx_timeout_task(struct net_device *netdev) { - struct netxen_adapter *adapter = - container_of(work, struct netxen_adapter, tx_timeout_task); - struct net_device *netdev = adapter->netdev; + struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); unsigned long flags; printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", netxen_nic_driver_name, netdev->name); - spin_lock_irqsave(&adapter->lock, flags); + spin_lock_irqsave(&port->adapter->lock, flags); netxen_nic_close(netdev); netxen_nic_open(netdev); - spin_unlock_irqrestore(&adapter->lock, flags); + spin_unlock_irqrestore(&port->adapter->lock, flags); netdev->trans_start = jiffies; netif_wake_queue(netdev); } diff --git a/trunk/drivers/net/ns83820.c b/trunk/drivers/net/ns83820.c index 312e0e331712..b0127c71a5b6 100644 --- a/trunk/drivers/net/ns83820.c +++ b/trunk/drivers/net/ns83820.c @@ -427,7 +427,6 @@ struct ns83820 { u8 __iomem *base; struct pci_dev *pci_dev; - struct net_device *ndev; #ifdef NS83820_VLAN_ACCEL_SUPPORT struct vlan_group *vlgrp; @@ -632,10 +631,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev) } /* REFILL */ -static inline void queue_refill(struct work_struct *work) +static inline void queue_refill(void *_dev) { - struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); - struct net_device *ndev = dev->ndev; + struct net_device *ndev = _dev; + struct ns83820 *dev = PRIV(ndev); rx_refill(ndev, GFP_KERNEL); if (dev->rx_info.up) @@ -1842,7 +1841,6 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ ndev = alloc_etherdev(sizeof(struct ns83820)); dev = PRIV(ndev); - dev->ndev = ndev; err = -ENOMEM; if (!dev) goto out; @@ -1855,7 +1853,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pci_dev->dev); - INIT_WORK(&dev->tq_refill, queue_refill); + INIT_WORK(&dev->tq_refill, queue_refill, ndev); tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); err = pci_enable_device(pci_dev); diff --git a/trunk/drivers/net/pcmcia/xirc2ps_cs.c b/trunk/drivers/net/pcmcia/xirc2ps_cs.c index 8478dca3d8d1..69813406782d 100644 --- a/trunk/drivers/net/pcmcia/xirc2ps_cs.c +++ b/trunk/drivers/net/pcmcia/xirc2ps_cs.c @@ -332,7 +332,6 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); */ typedef struct local_info_t { - struct net_device *dev; struct pcmcia_device *p_dev; dev_node_t node; struct net_device_stats stats; @@ -354,7 +353,7 @@ typedef struct local_info_t { */ static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void do_tx_timeout(struct net_device *dev); -static void xirc2ps_tx_timeout_task(struct work_struct *work); +static void xirc2ps_tx_timeout_task(void *data); static struct net_device_stats *do_get_stats(struct net_device *dev); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); @@ -568,7 +567,6 @@ xirc2ps_probe(struct pcmcia_device *link) if (!dev) return -ENOMEM; local = netdev_priv(dev); - local->dev = dev; local->p_dev = link; link->priv = dev; @@ -593,7 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link) #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = do_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); #endif return xirc2ps_config(link); @@ -1326,11 +1324,9 @@ xirc2ps_interrupt(int irq, void *dev_id) /*====================================================================*/ static void -xirc2ps_tx_timeout_task(struct work_struct *work) +xirc2ps_tx_timeout_task(void *data) { - local_info_t *local = - container_of(work, local_info_t, tx_timeout_task); - struct net_device *dev = local->dev; + struct net_device *dev = data; /* reset the card */ do_reset(dev,1); dev->trans_start = jiffies; diff --git a/trunk/drivers/net/phy/phy.c b/trunk/drivers/net/phy/phy.c index 4044bb1ada86..88237bdb5255 100644 --- a/trunk/drivers/net/phy/phy.c +++ b/trunk/drivers/net/phy/phy.c @@ -397,7 +397,7 @@ int phy_start_aneg(struct phy_device *phydev) EXPORT_SYMBOL(phy_start_aneg); -static void phy_change(struct work_struct *work); +static void phy_change(void *data); static void phy_timer(unsigned long data); /* phy_start_machine: @@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev) { int err = 0; - INIT_WORK(&phydev->phy_queue, phy_change); + INIT_WORK(&phydev->phy_queue, phy_change, phydev); if (request_irq(phydev->irq, phy_interrupt, IRQF_SHARED, @@ -598,11 +598,10 @@ EXPORT_SYMBOL(phy_stop_interrupts); /* Scheduled by the phy_interrupt/timer to handle PHY changes */ -static void phy_change(struct work_struct *work) +static void phy_change(void *data) { int err; - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); + struct phy_device *phydev = data; err = phy_disable_interrupts(phydev); diff --git a/trunk/drivers/net/plip.c b/trunk/drivers/net/plip.c index 6bb085f54437..71afb274498f 100644 --- a/trunk/drivers/net/plip.c +++ b/trunk/drivers/net/plip.c @@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG; #define PLIP_NIBBLE_WAIT 3000 /* Bottom halves */ -static void plip_kick_bh(struct work_struct *work); -static void plip_bh(struct work_struct *work); -static void plip_timer_bh(struct work_struct *work); +static void plip_kick_bh(struct net_device *dev); +static void plip_bh(struct net_device *dev); +static void plip_timer_bh(struct net_device *dev); /* Interrupt handler */ static void plip_interrupt(int irq, void *dev_id); @@ -207,10 +207,9 @@ struct plip_local { struct net_local { struct net_device_stats enet_stats; - struct net_device *dev; struct work_struct immediate; - struct delayed_work deferred; - struct delayed_work timer; + struct work_struct deferred; + struct work_struct timer; struct plip_local snd_data; struct plip_local rcv_data; struct pardevice *pardev; @@ -307,11 +306,11 @@ plip_init_netdev(struct net_device *dev) nl->nibble = PLIP_NIBBLE_WAIT; /* Initialize task queue structures */ - INIT_WORK(&nl->immediate, plip_bh); - INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); + INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); + INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); if (dev->irq == -1) - INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); + INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); spin_lock_init(&nl->lock); } @@ -320,10 +319,9 @@ plip_init_netdev(struct net_device *dev) This routine is kicked by do_timer(). Request `plip_bh' to be invoked. */ static void -plip_kick_bh(struct work_struct *work) +plip_kick_bh(struct net_device *dev) { - struct net_local *nl = - container_of(work, struct net_local, deferred.work); + struct net_local *nl = netdev_priv(dev); if (nl->is_deferred) schedule_work(&nl->immediate); @@ -364,9 +362,9 @@ static const plip_func connection_state_table[] = /* Bottom half handler of PLIP. */ static void -plip_bh(struct work_struct *work) +plip_bh(struct net_device *dev) { - struct net_local *nl = container_of(work, struct net_local, immediate); + struct net_local *nl = netdev_priv(dev); struct plip_local *snd = &nl->snd_data; struct plip_local *rcv = &nl->rcv_data; plip_func f; @@ -374,21 +372,20 @@ plip_bh(struct work_struct *work) nl->is_deferred = 0; f = connection_state_table[nl->connection]; - if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK - && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { + if ((r = (*f)(dev, nl, snd, rcv)) != OK + && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { nl->is_deferred = 1; schedule_delayed_work(&nl->deferred, 1); } } static void -plip_timer_bh(struct work_struct *work) +plip_timer_bh(struct net_device *dev) { - struct net_local *nl = - container_of(work, struct net_local, timer.work); + struct net_local *nl = netdev_priv(dev); if (!(atomic_read (&nl->kill_timer))) { - plip_interrupt (-1, nl->dev); + plip_interrupt (-1, dev); schedule_delayed_work(&nl->timer, 1); } @@ -1287,7 +1284,6 @@ static void plip_attach (struct parport *port) } nl = netdev_priv(dev); - nl->dev = dev; nl->pardev = parport_register_device(port, name, plip_preempt, plip_wakeup, plip_interrupt, 0, dev); diff --git a/trunk/drivers/net/qla3xxx.c b/trunk/drivers/net/qla3xxx.c index d79d141a601d..ec640f6229ae 100644 --- a/trunk/drivers/net/qla3xxx.c +++ b/trunk/drivers/net/qla3xxx.c @@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) "%s: Another function issued a reset to the " "chip. ISR value = %x.\n", ndev->name, value); } - queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); + queue_work(qdev->workqueue, &qdev->reset_work); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); @@ -3182,13 +3182,11 @@ static void ql3xxx_tx_timeout(struct net_device *ndev) /* * Wake up the worker to process this event. */ - queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); + queue_work(qdev->workqueue, &qdev->tx_timeout_work); } -static void ql_reset_work(struct work_struct *work) +static void ql_reset_work(struct ql3_adapter *qdev) { - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, reset_work.work); struct net_device *ndev = qdev->ndev; u32 value; struct ql_tx_buf_cb *tx_cb; @@ -3280,12 +3278,9 @@ static void ql_reset_work(struct work_struct *work) } } -static void ql_tx_timeout_work(struct work_struct *work) +static void ql_tx_timeout_work(struct ql3_adapter *qdev) { - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, tx_timeout_work.work); - - ql_cycle_adapter(qdev, QL_DO_RESET); + ql_cycle_adapter(qdev,QL_DO_RESET); } static void ql_get_board_info(struct ql3_adapter *qdev) @@ -3464,8 +3459,9 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, netif_stop_queue(ndev); qdev->workqueue = create_singlethread_workqueue(ndev->name); - INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); - INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); + INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); + INIT_WORK(&qdev->tx_timeout_work, + (void (*)(void *))ql_tx_timeout_work, qdev); init_timer(&qdev->adapter_timer); qdev->adapter_timer.function = ql3xxx_timer; diff --git a/trunk/drivers/net/qla3xxx.h b/trunk/drivers/net/qla3xxx.h index ea94de7fd071..65da2c0bfda6 100644 --- a/trunk/drivers/net/qla3xxx.h +++ b/trunk/drivers/net/qla3xxx.h @@ -1186,8 +1186,8 @@ struct ql3_adapter { u32 numPorts; struct net_device_stats stats; struct workqueue_struct *workqueue; - struct delayed_work reset_work; - struct delayed_work tx_timeout_work; + struct work_struct reset_work; + struct work_struct tx_timeout_work; u32 max_frame_size; }; diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index 85a392fab5cc..45d3ca431957 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -424,7 +424,6 @@ struct ring_info { struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ - struct net_device *dev; struct net_device_stats stats; /* statistics of net device */ spinlock_t lock; /* spin lock flag */ u32 msg_enable; @@ -456,7 +455,7 @@ struct rtl8169_private { void (*phy_reset_enable)(void __iomem *); unsigned int (*phy_reset_pending)(void __iomem *); unsigned int (*link_ok)(void __iomem *); - struct delayed_work task; + struct work_struct task; unsigned wol_enabled : 1; }; @@ -1511,7 +1510,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); - tp->dev = dev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); /* enable device (incl. PCI PM wakeup and hotplug setup) */ @@ -1784,7 +1782,7 @@ static int rtl8169_open(struct net_device *dev) if (retval < 0) goto err_free_rx; - INIT_DELAYED_WORK(&tp->task, NULL); + INIT_WORK(&tp->task, NULL, dev); rtl8169_hw_start(dev); @@ -2107,11 +2105,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) tp->cur_tx = tp->dirty_tx = 0; } -static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) +static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) { struct rtl8169_private *tp = netdev_priv(dev); - PREPARE_DELAYED_WORK(&tp->task, task); + PREPARE_WORK(&tp->task, task, dev); schedule_delayed_work(&tp->task, 4); } @@ -2130,11 +2128,9 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) netif_poll_enable(dev); } -static void rtl8169_reinit_task(struct work_struct *work) +static void rtl8169_reinit_task(void *_data) { - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); - struct net_device *dev = tp->dev; + struct net_device *dev = _data; int ret; if (netif_running(dev)) { @@ -2157,11 +2153,10 @@ static void rtl8169_reinit_task(struct work_struct *work) } } -static void rtl8169_reset_task(struct work_struct *work) +static void rtl8169_reset_task(void *_data) { - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); - struct net_device *dev = tp->dev; + struct net_device *dev = _data; + struct rtl8169_private *tp = netdev_priv(dev); if (!netif_running(dev)) return; diff --git a/trunk/drivers/net/s2io.c b/trunk/drivers/net/s2io.c index 250cdbeefdfd..33569ec9dbfc 100644 --- a/trunk/drivers/net/s2io.c +++ b/trunk/drivers/net/s2io.c @@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr) * Description: Sets the link status for the adapter */ -static void s2io_set_link(struct work_struct *work) +static void s2io_set_link(unsigned long data) { - nic_t *nic = container_of(work, nic_t, set_link_task); + nic_t *nic = (nic_t *) data; struct net_device *dev = nic->dev; XENA_dev_config_t __iomem *bar0 = nic->bar0; register u64 val64; @@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp) * spin lock. */ -static void s2io_restart_nic(struct work_struct *work) +static void s2io_restart_nic(unsigned long data) { - nic_t *sp = container_of(work, nic_t, rst_timer_task); - struct net_device *dev = sp->dev; + struct net_device *dev = (struct net_device *) data; + nic_t *sp = dev->priv; s2io_card_down(sp); if (s2io_card_up(sp)) { @@ -6992,8 +6992,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->tx_timeout = &s2io_tx_watchdog; dev->watchdog_timeo = WATCH_DOG_TIMEOUT; - INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); - INIT_WORK(&sp->set_link_task, s2io_set_link); + INIT_WORK(&sp->rst_timer_task, + (void (*)(void *)) s2io_restart_nic, dev); + INIT_WORK(&sp->set_link_task, + (void (*)(void *)) s2io_set_link, sp); pci_save_state(sp->pdev); diff --git a/trunk/drivers/net/s2io.h b/trunk/drivers/net/s2io.h index 3b0bafd273c8..12b719f4d00f 100644 --- a/trunk/drivers/net/s2io.h +++ b/trunk/drivers/net/s2io.h @@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id); static irqreturn_t s2io_isr(int irq, void *dev_id); static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); static const struct ethtool_ops netdev_ethtool_ops; -static void s2io_set_link(struct work_struct *work); +static void s2io_set_link(unsigned long data); static int s2io_set_swapper(nic_t * sp); static void s2io_card_down(nic_t *nic); static int s2io_card_up(nic_t *nic); diff --git a/trunk/drivers/net/sis190.c b/trunk/drivers/net/sis190.c index b70ed79d4121..aaba458584fb 100644 --- a/trunk/drivers/net/sis190.c +++ b/trunk/drivers/net/sis190.c @@ -280,7 +280,6 @@ enum sis190_feature { struct sis190_private { void __iomem *mmio_addr; struct pci_dev *pci_dev; - struct net_device *dev; struct net_device_stats stats; spinlock_t lock; u32 rx_buf_sz; @@ -898,11 +897,10 @@ static void sis190_hw_start(struct net_device *dev) netif_start_queue(dev); } -static void sis190_phy_task(struct work_struct *work) +static void sis190_phy_task(void * data) { - struct sis190_private *tp = - container_of(work, struct sis190_private, phy_task); - struct net_device *dev = tp->dev; + struct net_device *dev = data; + struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; u16 val; @@ -1049,7 +1047,7 @@ static int sis190_open(struct net_device *dev) if (rc < 0) goto err_free_rx_1; - INIT_WORK(&tp->phy_task, sis190_phy_task); + INIT_WORK(&tp->phy_task, sis190_phy_task, dev); sis190_request_timer(dev); @@ -1438,7 +1436,6 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); - tp->dev = dev; tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); rc = pci_enable_device(pdev); @@ -1801,7 +1798,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, sis190_init_rxfilter(dev); - INIT_WORK(&tp->phy_task, sis190_phy_task); + INIT_WORK(&tp->phy_task, sis190_phy_task, dev); dev->open = sis190_open; dev->stop = sis190_close; diff --git a/trunk/drivers/net/skge.c b/trunk/drivers/net/skge.c index b60f0451f6cd..5513907e8393 100644 --- a/trunk/drivers/net/skge.c +++ b/trunk/drivers/net/skge.c @@ -1327,11 +1327,10 @@ static void xm_check_link(struct net_device *dev) * Since internal PHY is wired to a level triggered pin, can't * get an interrupt when carrier is detected. */ -static void xm_link_timer(struct work_struct *work) +static void xm_link_timer(void *arg) { - struct skge_port *skge = - container_of(work, struct skge_port, link_thread.work); - struct net_device *dev = skge->netdev; + struct net_device *dev = arg; + struct skge_port *skge = netdev_priv(arg); struct skge_hw *hw = skge->hw; int port = skge->port; @@ -3073,9 +3072,9 @@ static void skge_error_irq(struct skge_hw *hw) * because accessing phy registers requires spin wait which might * cause excess interrupt latency. */ -static void skge_extirq(struct work_struct *work) +static void skge_extirq(void *arg) { - struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); + struct skge_hw *hw = arg; int port; mutex_lock(&hw->phy_mutex); @@ -3457,7 +3456,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->port = port; /* Only used for Genesis XMAC */ - INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); + INIT_WORK(&skge->link_thread, xm_link_timer, dev); if (hw->chip_id != CHIP_ID_GENESIS) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; @@ -3544,7 +3543,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, hw->pdev = pdev; mutex_init(&hw->phy_mutex); - INIT_WORK(&hw->phy_work, skge_extirq); + INIT_WORK(&hw->phy_work, skge_extirq, hw); spin_lock_init(&hw->hw_lock); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); diff --git a/trunk/drivers/net/skge.h b/trunk/drivers/net/skge.h index 23e5275d920c..537c0aaa1db8 100644 --- a/trunk/drivers/net/skge.h +++ b/trunk/drivers/net/skge.h @@ -2456,7 +2456,7 @@ struct skge_port { struct net_device_stats net_stats; - struct delayed_work link_thread; + struct work_struct link_thread; enum pause_control flow_control; enum pause_status flow_status; u8 rx_csum; diff --git a/trunk/drivers/net/smc91x.c b/trunk/drivers/net/smc91x.c index e62a9586fb95..95b6478f55c6 100644 --- a/trunk/drivers/net/smc91x.c +++ b/trunk/drivers/net/smc91x.c @@ -210,7 +210,6 @@ struct smc_local { /* work queue */ struct work_struct phy_configure; - struct net_device *dev; int work_pending; spinlock_t lock; @@ -1115,11 +1114,10 @@ static void smc_phy_check_media(struct net_device *dev, int init) * of autonegotiation.) If the RPC ANEG bit is cleared, the selection * is controlled by the RPC SPEED and RPC DPLX bits. */ -static void smc_phy_configure(struct work_struct *work) +static void smc_phy_configure(void *data) { - struct smc_local *lp = - container_of(work, struct smc_local, phy_configure); - struct net_device *dev = lp->dev; + struct net_device *dev = data; + struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; int phyaddr = lp->mii.phy_id; int my_phy_caps; /* My PHY capabilities */ @@ -1594,7 +1592,7 @@ smc_open(struct net_device *dev) /* Configure the PHY, initialize the link state */ if (lp->phy_type != 0) - smc_phy_configure(&lp->phy_configure); + smc_phy_configure(dev); else { spin_lock_irq(&lp->lock); smc_10bt_check_media(dev, 1); @@ -1974,8 +1972,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) #endif tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); - INIT_WORK(&lp->phy_configure, smc_phy_configure); - lp->dev = dev; + INIT_WORK(&lp->phy_configure, smc_phy_configure, dev); lp->mii.phy_id_mask = 0x1f; lp->mii.reg_num_mask = 0x1f; lp->mii.force_media = 0; @@ -2325,7 +2322,7 @@ static int smc_drv_resume(struct platform_device *dev) smc_reset(ndev); smc_enable(ndev); if (lp->phy_type != 0) - smc_phy_configure(&lp->phy_configure); + smc_phy_configure(ndev); netif_device_attach(ndev); } } diff --git a/trunk/drivers/net/spider_net.c b/trunk/drivers/net/spider_net.c index ebb6aa39f9c7..13e0a43e423b 100644 --- a/trunk/drivers/net/spider_net.c +++ b/trunk/drivers/net/spider_net.c @@ -1939,11 +1939,10 @@ spider_net_stop(struct net_device *netdev) * called as task when tx hangs, resets interface (if interface is up) */ static void -spider_net_tx_timeout_task(struct work_struct *work) +spider_net_tx_timeout_task(void *data) { - struct spider_net_card *card = - container_of(work, struct spider_net_card, tx_timeout_task); - struct net_device *netdev = card->netdev; + struct net_device *netdev = data; + struct spider_net_card *card = netdev_priv(netdev); if (!(netdev->flags & IFF_UP)) goto out; @@ -2117,7 +2116,7 @@ spider_net_alloc_card(void) card = netdev_priv(netdev); card->netdev = netdev; card->msg_enable = SPIDER_NET_DEFAULT_MSG; - INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); + INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); init_waitqueue_head(&card->waitq); atomic_set(&card->tx_timeout_task_counter, 0); diff --git a/trunk/drivers/net/sungem.c b/trunk/drivers/net/sungem.c index 785e4a535f9e..cf44e72399b9 100644 --- a/trunk/drivers/net/sungem.c +++ b/trunk/drivers/net/sungem.c @@ -2282,9 +2282,9 @@ static void gem_do_stop(struct net_device *dev, int wol) } } -static void gem_reset_task(struct work_struct *work) +static void gem_reset_task(void *data) { - struct gem *gp = container_of(work, struct gem, reset_task); + struct gem *gp = (struct gem *) data; mutex_lock(&gp->pm_mutex); @@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gp->link_timer.function = gem_link_timer; gp->link_timer.data = (unsigned long) gp; - INIT_WORK(&gp->reset_task, gem_reset_task); + INIT_WORK(&gp->reset_task, gem_reset_task, gp); gp->lstate = link_down; gp->timer_ticks = 0; diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index d9123c9adc1e..c20bb998e0e5 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -3654,9 +3654,9 @@ static void tg3_poll_controller(struct net_device *dev) } #endif -static void tg3_reset_task(struct work_struct *work) +static void tg3_reset_task(void *_data) { - struct tg3 *tp = container_of(work, struct tg3, reset_task); + struct tg3 *tp = _data; unsigned int restart_timer; tg3_full_lock(tp, 0); @@ -11734,7 +11734,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, #endif spin_lock_init(&tp->lock); spin_lock_init(&tp->indirect_lock); - INIT_WORK(&tp->reset_task, tg3_reset_task); + INIT_WORK(&tp->reset_task, tg3_reset_task, tp); tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); if (tp->regs == 0UL) { diff --git a/trunk/drivers/net/tlan.c b/trunk/drivers/net/tlan.c index f85f00251123..e14f5a00f65a 100644 --- a/trunk/drivers/net/tlan.c +++ b/trunk/drivers/net/tlan.c @@ -296,7 +296,6 @@ static void TLan_SetMulticastList( struct net_device *); static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); static void TLan_tx_timeout( struct net_device *dev); -static void TLan_tx_timeout_work(struct work_struct *work); static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); static u32 TLan_HandleInvalid( struct net_device *, u16 ); @@ -563,7 +562,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, priv = netdev_priv(dev); priv->pciDev = pdev; - priv->dev = dev; /* Is this a PCI device? */ if (pdev) { @@ -636,7 +634,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, /* This will be used when we get an adapter error from * within our irq handler */ - INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); + INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); spin_lock_init(&priv->lock); @@ -1042,25 +1040,6 @@ static void TLan_tx_timeout(struct net_device *dev) } - /*************************************************************** - * TLan_tx_timeout_work - * - * Returns: nothing - * - * Params: - * work work item of device which timed out - * - **************************************************************/ - -static void TLan_tx_timeout_work(struct work_struct *work) -{ - TLanPrivateInfo *priv = - container_of(work, TLanPrivateInfo, tlan_tqueue); - - TLan_tx_timeout(priv->dev); -} - - /*************************************************************** * TLan_StartTx diff --git a/trunk/drivers/net/tlan.h b/trunk/drivers/net/tlan.h index 41ce0b665937..a44e2f2ef62a 100644 --- a/trunk/drivers/net/tlan.h +++ b/trunk/drivers/net/tlan.h @@ -170,7 +170,6 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; typedef struct tlan_private_tag { struct net_device *nextDevice; struct pci_dev *pciDev; - struct net_device *dev; void *dmaStorage; dma_addr_t dmaStorageDMA; unsigned int dmaSize; diff --git a/trunk/drivers/net/tulip/21142.c b/trunk/drivers/net/tulip/21142.c index 942b839ccc5b..fa3a2bb105ad 100644 --- a/trunk/drivers/net/tulip/21142.c +++ b/trunk/drivers/net/tulip/21142.c @@ -26,11 +26,10 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list of available transceivers. */ -void t21142_media_task(struct work_struct *work) +void t21142_media_task(void *data) { - struct tulip_private *tp = - container_of(work, struct tulip_private, media_work); - struct net_device *dev = tp->dev; + struct net_device *dev = data; + struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr12 = ioread32(ioaddr + CSR12); int next_tick = 60*HZ; diff --git a/trunk/drivers/net/tulip/timer.c b/trunk/drivers/net/tulip/timer.c index df326fe1cc8f..066e5d6bcbd8 100644 --- a/trunk/drivers/net/tulip/timer.c +++ b/trunk/drivers/net/tulip/timer.c @@ -18,11 +18,10 @@ #include "tulip.h" -void tulip_media_task(struct work_struct *work) +void tulip_media_task(void *data) { - struct tulip_private *tp = - container_of(work, struct tulip_private, media_work); - struct net_device *dev = tp->dev; + struct net_device *dev = data; + struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; u32 csr12 = ioread32(ioaddr + CSR12); int next_tick = 2*HZ; diff --git a/trunk/drivers/net/tulip/tulip.h b/trunk/drivers/net/tulip/tulip.h index 25f25da76917..ad107f45c7b1 100644 --- a/trunk/drivers/net/tulip/tulip.h +++ b/trunk/drivers/net/tulip/tulip.h @@ -44,7 +44,7 @@ struct tulip_chip_table { int valid_intrs; /* CSR7 interrupt enable settings */ int flags; void (*media_timer) (unsigned long); - work_func_t media_task; + void (*media_task) (void *); }; @@ -392,7 +392,6 @@ struct tulip_private { int csr12_shadow; int pad0; /* Used for 8-byte alignment */ struct work_struct media_work; - struct net_device *dev; }; @@ -407,7 +406,7 @@ struct eeprom_fixup { /* 21142.c */ extern u16 t21142_csr14[]; -void t21142_media_task(struct work_struct *work); +void t21142_media_task(void *data); void t21142_start_nway(struct net_device *dev); void t21142_lnk_change(struct net_device *dev, int csr5); @@ -445,7 +444,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5); void pnic_timer(unsigned long data); /* timer.c */ -void tulip_media_task(struct work_struct *work); +void tulip_media_task(void *data); void mxic_timer(unsigned long data); void comet_timer(unsigned long data); diff --git a/trunk/drivers/net/tulip/tulip_core.c b/trunk/drivers/net/tulip/tulip_core.c index 5a35354aa523..0aee618f883c 100644 --- a/trunk/drivers/net/tulip/tulip_core.c +++ b/trunk/drivers/net/tulip/tulip_core.c @@ -1367,7 +1367,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, * it is zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); - tp->dev = dev; tp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct tulip_rx_desc) * RX_RING_SIZE + @@ -1390,7 +1389,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, tp->timer.data = (unsigned long)dev; tp->timer.function = tulip_tbl[tp->chip_id].media_timer; - INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); + INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); dev->base_addr = (unsigned long)ioaddr; diff --git a/trunk/drivers/net/wan/pc300_tty.c b/trunk/drivers/net/wan/pc300_tty.c index b2a23aed4428..931cbdf6d791 100644 --- a/trunk/drivers/net/wan/pc300_tty.c +++ b/trunk/drivers/net/wan/pc300_tty.c @@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty); static int cpc_tty_chars_in_buffer(struct tty_struct *tty); static void cpc_tty_flush_buffer(struct tty_struct *tty); static void cpc_tty_hangup(struct tty_struct *tty); -static void cpc_tty_rx_work(struct work_struct *work); -static void cpc_tty_tx_work(struct work_struct *work); +static void cpc_tty_rx_work(void *data); +static void cpc_tty_tx_work(void *data); static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); @@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev) cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; cpc_tty->pc300dev = pc300dev; - INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work); - INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work); + INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); + INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; @@ -659,23 +659,21 @@ static void cpc_tty_hangup(struct tty_struct *tty) * o call the line disc. read * o free memory */ -static void cpc_tty_rx_work(struct work_struct *work) +static void cpc_tty_rx_work(void * data) { - st_cpc_tty_area *cpc_tty; unsigned long port; int i, j; + st_cpc_tty_area *cpc_tty; volatile st_cpc_rx_buf *buf; char flags=0,flg_rx=1; struct tty_ldisc *ld; if (cpc_tty_cnt == 0) return; + for (i=0; (i < 4) && flg_rx ; i++) { flg_rx = 0; - - cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work); - port = cpc_tty - cpc_tty_area; - + port = (unsigned long)data; for (j=0; j < CPC_TTY_NPORTS; j++) { cpc_tty = &cpc_tty_area[port]; @@ -884,10 +882,9 @@ void cpc_tty_receive(pc300dev_t *pc300dev) * o if need call line discipline wakeup * o call wake_up_interruptible */ -static void cpc_tty_tx_work(struct work_struct *work) +static void cpc_tty_tx_work(void *data) { - st_cpc_tty_area *cpc_tty = - container_of(work, st_cpc_tty_area, tty_tx_work); + st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; struct tty_struct *tty; CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); diff --git a/trunk/drivers/net/wireless/airo.c b/trunk/drivers/net/wireless/airo.c index 44a22701da97..efcdaf1c5f73 100644 --- a/trunk/drivers/net/wireless/airo.c +++ b/trunk/drivers/net/wireless/airo.c @@ -49,7 +49,6 @@ #include #include #include -#include #include "airo.h" diff --git a/trunk/drivers/net/wireless/bcm43xx/bcm43xx.h b/trunk/drivers/net/wireless/bcm43xx/bcm43xx.h index 8286678513b9..94dfb92fab5c 100644 --- a/trunk/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/trunk/drivers/net/wireless/bcm43xx/bcm43xx.h @@ -819,7 +819,7 @@ struct bcm43xx_private { struct tasklet_struct isr_tasklet; /* Periodic tasks */ - struct delayed_work periodic_work; + struct work_struct periodic_work; unsigned int periodic_state; struct work_struct restart_work; diff --git a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 2ec2e5afce67..5b3c27359a18 100644 --- a/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/trunk/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3215,10 +3215,9 @@ static void do_periodic_work(struct bcm43xx_private *bcm) schedule_delayed_work(&bcm->periodic_work, HZ * 15); } -static void bcm43xx_periodic_work_handler(struct work_struct *work) +static void bcm43xx_periodic_work_handler(void *d) { - struct bcm43xx_private *bcm = - container_of(work, struct bcm43xx_private, periodic_work.work); + struct bcm43xx_private *bcm = d; struct net_device *net_dev = bcm->net_dev; unsigned long flags; u32 savedirqs = 0; @@ -3280,11 +3279,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) { - struct delayed_work *work = &bcm->periodic_work; + struct work_struct *work = &(bcm->periodic_work); assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); - INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler); - schedule_delayed_work(work, 0); + INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); + schedule_work(work); } static void bcm43xx_security_init(struct bcm43xx_private *bcm) @@ -3636,7 +3635,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) bcm43xx_periodic_tasks_setup(bcm); /*FIXME: This should be handled by softmac instead. */ - schedule_delayed_work(&bcm->softmac->associnfo.work, 0); + schedule_work(&bcm->softmac->associnfo.work); out: mutex_unlock(&(bcm)->mutex); @@ -4183,10 +4182,9 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev) /* Hard-reset the chip. Do not call this directly. * Use bcm43xx_controller_restart() */ -static void bcm43xx_chip_reset(struct work_struct *work) +static void bcm43xx_chip_reset(void *_bcm) { - struct bcm43xx_private *bcm = - container_of(work, struct bcm43xx_private, restart_work); + struct bcm43xx_private *bcm = _bcm; struct bcm43xx_phyinfo *phy; int err = -ENODEV; @@ -4213,7 +4211,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason) if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) return; printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); - INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset); + INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); schedule_work(&bcm->restart_work); } diff --git a/trunk/drivers/net/wireless/hostap/hostap.h b/trunk/drivers/net/wireless/hostap/hostap.h index e89c890d16fd..e663518bd570 100644 --- a/trunk/drivers/net/wireless/hostap/hostap.h +++ b/trunk/drivers/net/wireless/hostap/hostap.h @@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc); struct net_device_stats *hostap_get_stats(struct net_device *dev); void hostap_setup_dev(struct net_device *dev, local_info_t *local, int main_dev); -void hostap_set_multicast_list_queue(struct work_struct *work); +void hostap_set_multicast_list_queue(void *data); int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); void hostap_cleanup(local_info_t *local); diff --git a/trunk/drivers/net/wireless/hostap/hostap_ap.c b/trunk/drivers/net/wireless/hostap/hostap_ap.c index 08bc57a4b895..ba13125024cb 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_ap.c +++ b/trunk/drivers/net/wireless/hostap/hostap_ap.c @@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs " static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); static void hostap_event_expired_sta(struct net_device *dev, struct sta_info *sta); -static void handle_add_proc_queue(struct work_struct *work); +static void handle_add_proc_queue(void *data); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT -static void handle_wds_oper_queue(struct work_struct *work); +static void handle_wds_oper_queue(void *data); static void prism2_send_mgmt(struct net_device *dev, u16 type_subtype, char *body, int body_len, u8 *addr, u16 tx_cb_idx); @@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local) INIT_LIST_HEAD(&ap->sta_list); /* Initialize task queue structure for AP management */ - INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); + INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); ap->tx_callback_idx = hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); @@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local) printk(KERN_WARNING "%s: failed to register TX callback for " "AP\n", local->dev->name); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT - INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); + INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); ap->tx_callback_auth = hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); @@ -1062,10 +1062,9 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, } -static void handle_add_proc_queue(struct work_struct *work) +static void handle_add_proc_queue(void *data) { - struct ap_data *ap = container_of(work, struct ap_data, - add_sta_proc_queue); + struct ap_data *ap = (struct ap_data *) data; struct sta_info *sta; char name[20]; struct add_sta_proc_data *entry, *prev; @@ -1953,11 +1952,9 @@ static void handle_pspoll(local_info_t *local, #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT -static void handle_wds_oper_queue(struct work_struct *work) +static void handle_wds_oper_queue(void *data) { - struct ap_data *ap = container_of(work, struct ap_data, - wds_oper_queue); - local_info_t *local = ap->local; + local_info_t *local = data; struct wds_oper_data *entry, *prev; spin_lock_bh(&local->lock); diff --git a/trunk/drivers/net/wireless/hostap/hostap_hw.c b/trunk/drivers/net/wireless/hostap/hostap_hw.c index c19e68636a1c..ed00ebb6e7f4 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_hw.c +++ b/trunk/drivers/net/wireless/hostap/hostap_hw.c @@ -1645,9 +1645,9 @@ static void prism2_schedule_reset(local_info_t *local) /* Called only as scheduled task after noticing card timeout in interrupt * context */ -static void handle_reset_queue(struct work_struct *work) +static void handle_reset_queue(void *data) { - local_info_t *local = container_of(work, local_info_t, reset_queue); + local_info_t *local = (local_info_t *) data; printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); prism2_hw_reset(local->dev); @@ -2896,10 +2896,9 @@ static void hostap_passive_scan(unsigned long data) /* Called only as a scheduled task when communications quality values should * be updated. */ -static void handle_comms_qual_update(struct work_struct *work) +static void handle_comms_qual_update(void *data) { - local_info_t *local = - container_of(work, local_info_t, comms_qual_update); + local_info_t *local = data; prism2_update_comms_qual(local->dev); } @@ -3051,9 +3050,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set) } -static void handle_set_tim_queue(struct work_struct *work) +static void handle_set_tim_queue(void *data) { - local_info_t *local = container_of(work, local_info_t, set_tim_queue); + local_info_t *local = (local_info_t *) data; struct set_tim_data *entry; u16 val; @@ -3210,15 +3209,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, local->scan_channel_mask = 0xffff; /* Initialize task queue structures */ - INIT_WORK(&local->reset_queue, handle_reset_queue); + INIT_WORK(&local->reset_queue, handle_reset_queue, local); INIT_WORK(&local->set_multicast_list_queue, - hostap_set_multicast_list_queue); + hostap_set_multicast_list_queue, local->dev); - INIT_WORK(&local->set_tim_queue, handle_set_tim_queue); + INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); INIT_LIST_HEAD(&local->set_tim_list); spin_lock_init(&local->set_tim_lock); - INIT_WORK(&local->comms_qual_update, handle_comms_qual_update); + INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); /* Initialize tasklets for handling hardware IRQ related operations * outside hw IRQ handler */ diff --git a/trunk/drivers/net/wireless/hostap/hostap_info.c b/trunk/drivers/net/wireless/hostap/hostap_info.c index 5fd2b1ad7f5e..50f72d831cf4 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_info.c +++ b/trunk/drivers/net/wireless/hostap/hostap_info.c @@ -474,9 +474,9 @@ static void handle_info_queue_scanresults(local_info_t *local) /* Called only as scheduled task after receiving info frames (used to avoid * pending too much time in HW IRQ handler). */ -static void handle_info_queue(struct work_struct *work) +static void handle_info_queue(void *data) { - local_info_t *local = container_of(work, local_info_t, info_queue); + local_info_t *local = (local_info_t *) data; if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info)) @@ -493,7 +493,7 @@ void hostap_info_init(local_info_t *local) { skb_queue_head_init(&local->info_list); #ifndef PRISM2_NO_STATION_MODES - INIT_WORK(&local->info_queue, handle_info_queue); + INIT_WORK(&local->info_queue, handle_info_queue, local); #endif /* PRISM2_NO_STATION_MODES */ } diff --git a/trunk/drivers/net/wireless/hostap/hostap_main.c b/trunk/drivers/net/wireless/hostap/hostap_main.c index 0796be9d9e77..53374fcba77e 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_main.c +++ b/trunk/drivers/net/wireless/hostap/hostap_main.c @@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p) /* TODO: to be further implemented as soon as Prism2 fully supports * GroupAddresses and correct documentation is available */ -void hostap_set_multicast_list_queue(struct work_struct *work) +void hostap_set_multicast_list_queue(void *data) { - local_info_t *local = - container_of(work, local_info_t, set_multicast_list_queue); - struct net_device *dev = local->dev; + struct net_device *dev = (struct net_device *) data; struct hostap_interface *iface; + local_info_t *local; iface = netdev_priv(dev); + local = iface->local; if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, local->is_promisc)) { printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", diff --git a/trunk/drivers/net/wireless/ipw2100.c b/trunk/drivers/net/wireless/ipw2100.c index 1bcd352a813b..79607b8b877c 100644 --- a/trunk/drivers/net/wireless/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2100.c @@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw); -static void ipw2100_wx_event_work(struct work_struct *work); +static void ipw2100_wx_event_work(struct ipw2100_priv *priv); static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); static struct iw_handler_def ipw2100_wx_handler_def; @@ -679,8 +679,7 @@ static void schedule_reset(struct ipw2100_priv *priv) queue_delayed_work(priv->workqueue, &priv->reset_work, priv->reset_backoff * HZ); else - queue_delayed_work(priv->workqueue, &priv->reset_work, - 0); + queue_work(priv->workqueue, &priv->reset_work); if (priv->reset_backoff < MAX_RESET_BACKOFF) priv->reset_backoff++; @@ -1874,10 +1873,8 @@ static void ipw2100_down(struct ipw2100_priv *priv) netif_stop_queue(priv->net_dev); } -static void ipw2100_reset_adapter(struct work_struct *work) +static void ipw2100_reset_adapter(struct ipw2100_priv *priv) { - struct ipw2100_priv *priv = - container_of(work, struct ipw2100_priv, reset_work.work); unsigned long flags; union iwreq_data wrqu = { .ap_addr = { @@ -2074,9 +2071,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) return; if (priv->status & STATUS_SECURITY_UPDATED) - queue_delayed_work(priv->workqueue, &priv->security_work, 0); + queue_work(priv->workqueue, &priv->security_work); - queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); + queue_work(priv->workqueue, &priv->wx_event_work); } static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) @@ -5527,11 +5524,8 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) return err; } -static void ipw2100_security_work(struct work_struct *work) +static void ipw2100_security_work(struct ipw2100_priv *priv) { - struct ipw2100_priv *priv = - container_of(work, struct ipw2100_priv, security_work.work); - /* If we happen to have reconnected before we get a chance to * process this, then update the security settings--which causes * a disassociation to occur */ @@ -5754,7 +5748,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p) priv->reset_backoff = 0; mutex_unlock(&priv->action_mutex); - ipw2100_reset_adapter(&priv->reset_work.work); + ipw2100_reset_adapter(priv); return 0; done: @@ -5916,10 +5910,9 @@ static const struct ethtool_ops ipw2100_ethtool_ops = { .get_drvinfo = ipw_ethtool_get_drvinfo, }; -static void ipw2100_hang_check(struct work_struct *work) +static void ipw2100_hang_check(void *adapter) { - struct ipw2100_priv *priv = - container_of(work, struct ipw2100_priv, hang_check.work); + struct ipw2100_priv *priv = adapter; unsigned long flags; u32 rtc = 0xa5a5a5a5; u32 len = sizeof(rtc); @@ -5959,10 +5952,9 @@ static void ipw2100_hang_check(struct work_struct *work) spin_unlock_irqrestore(&priv->low_lock, flags); } -static void ipw2100_rf_kill(struct work_struct *work) +static void ipw2100_rf_kill(void *adapter) { - struct ipw2100_priv *priv = - container_of(work, struct ipw2100_priv, rf_kill.work); + struct ipw2100_priv *priv = adapter; unsigned long flags; spin_lock_irqsave(&priv->low_lock, flags); @@ -6111,11 +6103,14 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, priv->workqueue = create_workqueue(DRV_NAME); - INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); - INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); - INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); - INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); - INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); + INIT_WORK(&priv->reset_work, + (void (*)(void *))ipw2100_reset_adapter, priv); + INIT_WORK(&priv->security_work, + (void (*)(void *))ipw2100_security_work, priv); + INIT_WORK(&priv->wx_event_work, + (void (*)(void *))ipw2100_wx_event_work, priv); + INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv); + INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv); tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) ipw2100_irq_tasklet, (unsigned long)priv); @@ -8286,10 +8281,8 @@ static struct iw_handler_def ipw2100_wx_handler_def = { .get_wireless_stats = ipw2100_wx_wireless_stats, }; -static void ipw2100_wx_event_work(struct work_struct *work) +static void ipw2100_wx_event_work(struct ipw2100_priv *priv) { - struct ipw2100_priv *priv = - container_of(work, struct ipw2100_priv, wx_event_work.work); union iwreq_data wrqu; int len = ETH_ALEN; diff --git a/trunk/drivers/net/wireless/ipw2100.h b/trunk/drivers/net/wireless/ipw2100.h index de7d384d38af..55b7227198df 100644 --- a/trunk/drivers/net/wireless/ipw2100.h +++ b/trunk/drivers/net/wireless/ipw2100.h @@ -583,11 +583,11 @@ struct ipw2100_priv { struct tasklet_struct irq_tasklet; struct workqueue_struct *workqueue; - struct delayed_work reset_work; - struct delayed_work security_work; - struct delayed_work wx_event_work; - struct delayed_work hang_check; - struct delayed_work rf_kill; + struct work_struct reset_work; + struct work_struct security_work; + struct work_struct wx_event_work; + struct work_struct hang_check; + struct work_struct rf_kill; u32 interrupts; int tx_interrupts; diff --git a/trunk/drivers/net/wireless/ipw2200.c b/trunk/drivers/net/wireless/ipw2200.c index e82e56bb85e1..c692d01a76ca 100644 --- a/trunk/drivers/net/wireless/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2200.c @@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); static void ipw_rx_queue_replenish(void *); static int ipw_up(struct ipw_priv *); -static void ipw_bg_up(struct work_struct *work); +static void ipw_bg_up(void *); static void ipw_down(struct ipw_priv *); -static void ipw_bg_down(struct work_struct *work); +static void ipw_bg_down(void *); static int ipw_config(struct ipw_priv *); static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates); @@ -862,12 +862,11 @@ static void ipw_led_link_on(struct ipw_priv *priv) spin_unlock_irqrestore(&priv->lock, flags); } -static void ipw_bg_led_link_on(struct work_struct *work) +static void ipw_bg_led_link_on(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, led_link_on.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_led_link_on(priv); + ipw_led_link_on(data); mutex_unlock(&priv->mutex); } @@ -907,12 +906,11 @@ static void ipw_led_link_off(struct ipw_priv *priv) spin_unlock_irqrestore(&priv->lock, flags); } -static void ipw_bg_led_link_off(struct work_struct *work) +static void ipw_bg_led_link_off(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, led_link_off.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_led_link_off(priv); + ipw_led_link_off(data); mutex_unlock(&priv->mutex); } @@ -987,12 +985,11 @@ static void ipw_led_activity_off(struct ipw_priv *priv) spin_unlock_irqrestore(&priv->lock, flags); } -static void ipw_bg_led_activity_off(struct work_struct *work) +static void ipw_bg_led_activity_off(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, led_act_off.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_led_activity_off(priv); + ipw_led_activity_off(data); mutex_unlock(&priv->mutex); } @@ -2231,12 +2228,11 @@ static void ipw_adapter_restart(void *adapter) } } -static void ipw_bg_adapter_restart(struct work_struct *work) +static void ipw_bg_adapter_restart(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, adapter_restart); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_adapter_restart(priv); + ipw_adapter_restart(data); mutex_unlock(&priv->mutex); } @@ -2253,12 +2249,11 @@ static void ipw_scan_check(void *data) } } -static void ipw_bg_scan_check(struct work_struct *work) +static void ipw_bg_scan_check(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, scan_check.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_scan_check(priv); + ipw_scan_check(data); mutex_unlock(&priv->mutex); } @@ -3836,19 +3831,17 @@ static int ipw_disassociate(void *data) return 1; } -static void ipw_bg_disassociate(struct work_struct *work) +static void ipw_bg_disassociate(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, disassociate); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_disassociate(priv); + ipw_disassociate(data); mutex_unlock(&priv->mutex); } -static void ipw_system_config(struct work_struct *work) +static void ipw_system_config(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, system_config); + struct ipw_priv *priv = data; #ifdef CONFIG_IPW2200_PROMISCUOUS if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { @@ -4215,12 +4208,11 @@ static void ipw_gather_stats(struct ipw_priv *priv) IPW_STATS_INTERVAL); } -static void ipw_bg_gather_stats(struct work_struct *work) +static void ipw_bg_gather_stats(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, gather_stats.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_gather_stats(priv); + ipw_gather_stats(data); mutex_unlock(&priv->mutex); } @@ -4276,8 +4268,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, if (!(priv->status & STATUS_ROAMING)) { priv->status |= STATUS_ROAMING; if (!(priv->status & STATUS_SCANNING)) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + queue_work(priv->workqueue, + &priv->request_scan); } return; } @@ -4615,8 +4607,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, #ifdef CONFIG_IPW2200_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { priv->status |= STATUS_SCAN_FORCED; - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + queue_work(priv->workqueue, + &priv->request_scan); break; } priv->status &= ~STATUS_SCAN_FORCED; @@ -4639,8 +4631,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, /* Don't schedule if we aborted the scan */ priv->status &= ~STATUS_ROAMING; } else if (priv->status & STATUS_SCAN_PENDING) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + queue_work(priv->workqueue, + &priv->request_scan); else if (priv->config & CFG_BACKGROUND_SCAN && priv->status & STATUS_ASSOCIATED) queue_delayed_work(priv->workqueue, @@ -5063,12 +5055,11 @@ static void ipw_rx_queue_replenish(void *data) ipw_rx_queue_restock(priv); } -static void ipw_bg_rx_queue_replenish(struct work_struct *work) +static void ipw_bg_rx_queue_replenish(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, rx_replenish); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_rx_queue_replenish(priv); + ipw_rx_queue_replenish(data); mutex_unlock(&priv->mutex); } @@ -5498,10 +5489,9 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, return 1; } -static void ipw_merge_adhoc_network(struct work_struct *work) +static void ipw_merge_adhoc_network(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, merge_networks); + struct ipw_priv *priv = data; struct ieee80211_network *network = NULL; struct ipw_network_match match = { .network = priv->assoc_network @@ -5958,12 +5948,11 @@ static void ipw_adhoc_check(void *data) priv->assoc_request.beacon_interval); } -static void ipw_bg_adhoc_check(struct work_struct *work) +static void ipw_bg_adhoc_check(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, adhoc_check.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_adhoc_check(priv); + ipw_adhoc_check(data); mutex_unlock(&priv->mutex); } @@ -6310,26 +6299,19 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) return err; } -static void ipw_request_passive_scan(struct work_struct *work) -{ - struct ipw_priv *priv = - container_of(work, struct ipw_priv, request_passive_scan); - ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); +static int ipw_request_passive_scan(struct ipw_priv *priv) { + return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); } -static void ipw_request_scan(struct work_struct *work) -{ - struct ipw_priv *priv = - container_of(work, struct ipw_priv, request_scan.work); - ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); +static int ipw_request_scan(struct ipw_priv *priv) { + return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); } -static void ipw_bg_abort_scan(struct work_struct *work) +static void ipw_bg_abort_scan(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, abort_scan); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_abort_scan(priv); + ipw_abort_scan(data); mutex_unlock(&priv->mutex); } @@ -7102,10 +7084,9 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, /* * background support to run QoS activate functionality */ -static void ipw_bg_qos_activate(struct work_struct *work) +static void ipw_bg_qos_activate(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, qos_activate); + struct ipw_priv *priv = data; if (priv == NULL) return; @@ -7413,12 +7394,11 @@ static void ipw_roam(void *data) priv->status &= ~STATUS_ROAMING; } -static void ipw_bg_roam(struct work_struct *work) +static void ipw_bg_roam(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, roam); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_roam(priv); + ipw_roam(data); mutex_unlock(&priv->mutex); } @@ -7499,8 +7479,8 @@ static int ipw_associate(void *data) &priv->request_scan, SCAN_INTERVAL); else - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + queue_work(priv->workqueue, + &priv->request_scan); } return 0; @@ -7511,12 +7491,11 @@ static int ipw_associate(void *data) return 1; } -static void ipw_bg_associate(struct work_struct *work) +static void ipw_bg_associate(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, associate); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_associate(priv); + ipw_associate(data); mutex_unlock(&priv->mutex); } @@ -9431,7 +9410,7 @@ static int ipw_wx_set_scan(struct net_device *dev, IPW_DEBUG_WX("Start scan\n"); - queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + queue_work(priv->workqueue, &priv->request_scan); return 0; } @@ -10568,12 +10547,11 @@ static void ipw_rf_kill(void *adapter) spin_unlock_irqrestore(&priv->lock, flags); } -static void ipw_bg_rf_kill(struct work_struct *work) +static void ipw_bg_rf_kill(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, rf_kill.work); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_rf_kill(priv); + ipw_rf_kill(data); mutex_unlock(&priv->mutex); } @@ -10604,12 +10582,11 @@ static void ipw_link_up(struct ipw_priv *priv) queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); } -static void ipw_bg_link_up(struct work_struct *work) +static void ipw_bg_link_up(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, link_up); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_link_up(priv); + ipw_link_up(data); mutex_unlock(&priv->mutex); } @@ -10629,16 +10606,15 @@ static void ipw_link_down(struct ipw_priv *priv) if (!(priv->status & STATUS_EXIT_PENDING)) { /* Queue up another scan... */ - queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + queue_work(priv->workqueue, &priv->request_scan); } } -static void ipw_bg_link_down(struct work_struct *work) +static void ipw_bg_link_down(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, link_down); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_link_down(priv); + ipw_link_down(data); mutex_unlock(&priv->mutex); } @@ -10650,30 +10626,38 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) init_waitqueue_head(&priv->wait_command_queue); init_waitqueue_head(&priv->wait_state); - INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); - INIT_WORK(&priv->associate, ipw_bg_associate); - INIT_WORK(&priv->disassociate, ipw_bg_disassociate); - INIT_WORK(&priv->system_config, ipw_system_config); - INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); - INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); - INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); - INIT_WORK(&priv->up, ipw_bg_up); - INIT_WORK(&priv->down, ipw_bg_down); - INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); - INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); - INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); - INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); - INIT_WORK(&priv->roam, ipw_bg_roam); - INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); - INIT_WORK(&priv->link_up, ipw_bg_link_up); - INIT_WORK(&priv->link_down, ipw_bg_link_down); - INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); - INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); - INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); - INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); + INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); + INIT_WORK(&priv->associate, ipw_bg_associate, priv); + INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); + INIT_WORK(&priv->system_config, ipw_system_config, priv); + INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); + INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); + INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); + INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); + INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); + INIT_WORK(&priv->request_scan, + (void (*)(void *))ipw_request_scan, priv); + INIT_WORK(&priv->request_passive_scan, + (void (*)(void *))ipw_request_passive_scan, priv); + INIT_WORK(&priv->gather_stats, + (void (*)(void *))ipw_bg_gather_stats, priv); + INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); + INIT_WORK(&priv->roam, ipw_bg_roam, priv); + INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); + INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); + INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); + INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, + priv); + INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off, + priv); + INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off, + priv); + INIT_WORK(&priv->merge_networks, + (void (*)(void *))ipw_merge_adhoc_network, priv); #ifdef CONFIG_IPW2200_QOS - INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); + INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, + priv); #endif /* CONFIG_IPW2200_QOS */ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) @@ -11206,8 +11190,7 @@ static int ipw_up(struct ipw_priv *priv) /* If configure to try and auto-associate, kick * off a scan. */ - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + queue_work(priv->workqueue, &priv->request_scan); return 0; } @@ -11228,12 +11211,11 @@ static int ipw_up(struct ipw_priv *priv) return -EIO; } -static void ipw_bg_up(struct work_struct *work) +static void ipw_bg_up(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, up); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_up(priv); + ipw_up(data); mutex_unlock(&priv->mutex); } @@ -11300,12 +11282,11 @@ static void ipw_down(struct ipw_priv *priv) ipw_led_radio_off(priv); } -static void ipw_bg_down(struct work_struct *work) +static void ipw_bg_down(void *data) { - struct ipw_priv *priv = - container_of(work, struct ipw_priv, down); + struct ipw_priv *priv = data; mutex_lock(&priv->mutex); - ipw_down(priv); + ipw_down(data); mutex_unlock(&priv->mutex); } diff --git a/trunk/drivers/net/wireless/ipw2200.h b/trunk/drivers/net/wireless/ipw2200.h index 626a240a87d8..dad5eedefbf1 100644 --- a/trunk/drivers/net/wireless/ipw2200.h +++ b/trunk/drivers/net/wireless/ipw2200.h @@ -1290,21 +1290,21 @@ struct ipw_priv { struct workqueue_struct *workqueue; - struct delayed_work adhoc_check; + struct work_struct adhoc_check; struct work_struct associate; struct work_struct disassociate; struct work_struct system_config; struct work_struct rx_replenish; - struct delayed_work request_scan; + struct work_struct request_scan; struct work_struct request_passive_scan; struct work_struct adapter_restart; - struct delayed_work rf_kill; + struct work_struct rf_kill; struct work_struct up; struct work_struct down; - struct delayed_work gather_stats; + struct work_struct gather_stats; struct work_struct abort_scan; struct work_struct roam; - struct delayed_work scan_check; + struct work_struct scan_check; struct work_struct link_up; struct work_struct link_down; @@ -1319,9 +1319,9 @@ struct ipw_priv { u32 led_ofdm_on; u32 led_ofdm_off; - struct delayed_work led_link_on; - struct delayed_work led_link_off; - struct delayed_work led_act_off; + struct work_struct led_link_on; + struct work_struct led_link_off; + struct work_struct led_act_off; struct work_struct merge_networks; struct ipw_cmd_log *cmdlog; diff --git a/trunk/drivers/net/wireless/orinoco.c b/trunk/drivers/net/wireless/orinoco.c index 936c888e03e1..336cabac13b3 100644 --- a/trunk/drivers/net/wireless/orinoco.c +++ b/trunk/drivers/net/wireless/orinoco.c @@ -980,11 +980,9 @@ static void print_linkstatus(struct net_device *dev, u16 status) } /* Search scan results for requested BSSID, join it if found */ -static void orinoco_join_ap(struct work_struct *work) +static void orinoco_join_ap(struct net_device *dev) { - struct orinoco_private *priv = - container_of(work, struct orinoco_private, join_work); - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = netdev_priv(dev); struct hermes *hw = &priv->hw; int err; unsigned long flags; @@ -1057,11 +1055,9 @@ static void orinoco_join_ap(struct work_struct *work) } /* Send new BSSID to userspace */ -static void orinoco_send_wevents(struct work_struct *work) +static void orinoco_send_wevents(struct net_device *dev) { - struct orinoco_private *priv = - container_of(work, struct orinoco_private, wevent_work); - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = netdev_priv(dev); struct hermes *hw = &priv->hw; union iwreq_data wrqu; int err; @@ -1868,11 +1864,9 @@ __orinoco_set_multicast_list(struct net_device *dev) /* This must be called from user context, without locks held - use * schedule_work() */ -static void orinoco_reset(struct work_struct *work) +static void orinoco_reset(struct net_device *dev) { - struct orinoco_private *priv = - container_of(work, struct orinoco_private, reset_work); - struct net_device *dev = priv->ndev; + struct orinoco_private *priv = netdev_priv(dev); struct hermes *hw = &priv->hw; int err; unsigned long flags; @@ -2440,9 +2434,9 @@ struct net_device *alloc_orinocodev(int sizeof_card, priv->hw_unavailable = 1; /* orinoco_init() must clear this * before anything else touches the * hardware */ - INIT_WORK(&priv->reset_work, orinoco_reset); - INIT_WORK(&priv->join_work, orinoco_join_ap); - INIT_WORK(&priv->wevent_work, orinoco_send_wevents); + INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); + INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); + INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); netif_carrier_off(dev); priv->last_linkstatus = 0xffff; @@ -3614,7 +3608,7 @@ static int orinoco_ioctl_reset(struct net_device *dev, printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); /* Firmware reset */ - orinoco_reset(&priv->reset_work); + orinoco_reset(dev); } else { printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); @@ -4160,7 +4154,7 @@ static int orinoco_ioctl_commit(struct net_device *dev, return 0; if (priv->broken_disableport) { - orinoco_reset(&priv->reset_work); + orinoco_reset(dev); return 0; } diff --git a/trunk/drivers/net/wireless/prism54/isl_ioctl.c b/trunk/drivers/net/wireless/prism54/isl_ioctl.c index a87eb51886c8..4a20e45de3ca 100644 --- a/trunk/drivers/net/wireless/prism54/isl_ioctl.c +++ b/trunk/drivers/net/wireless/prism54/isl_ioctl.c @@ -157,9 +157,8 @@ prism54_mib_init(islpci_private *priv) * schedule_work(), thus we can as well use sleeping semaphore * locking */ void -prism54_update_stats(struct work_struct *work) +prism54_update_stats(islpci_private *priv) { - islpci_private *priv = container_of(work, islpci_private, stats_work); char *data; int j; struct obj_bss bss, *bss2; @@ -2494,10 +2493,9 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, * interrupt context, no locks held. */ void -prism54_process_trap(struct work_struct *work) +prism54_process_trap(void *data) { - struct islpci_mgmtframe *frame = - container_of(work, struct islpci_mgmtframe, ws); + struct islpci_mgmtframe *frame = data; struct net_device *ndev = frame->ndev; enum oid_num_t n = mgt_oidtonum(frame->header->oid); diff --git a/trunk/drivers/net/wireless/prism54/isl_ioctl.h b/trunk/drivers/net/wireless/prism54/isl_ioctl.h index bcfbfb9281d2..e8183d30c52e 100644 --- a/trunk/drivers/net/wireless/prism54/isl_ioctl.h +++ b/trunk/drivers/net/wireless/prism54/isl_ioctl.h @@ -31,12 +31,12 @@ void prism54_mib_init(islpci_private *); struct iw_statistics *prism54_get_wireless_stats(struct net_device *); -void prism54_update_stats(struct work_struct *); +void prism54_update_stats(islpci_private *); void prism54_acl_init(struct islpci_acl *); void prism54_acl_clean(struct islpci_acl *); -void prism54_process_trap(struct work_struct *); +void prism54_process_trap(void *); void prism54_wpa_bss_ie_init(islpci_private *priv); void prism54_wpa_bss_ie_clean(islpci_private *priv); diff --git a/trunk/drivers/net/wireless/prism54/islpci_dev.c b/trunk/drivers/net/wireless/prism54/islpci_dev.c index f057fd9fcd79..1e0603ca436c 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_dev.c +++ b/trunk/drivers/net/wireless/prism54/islpci_dev.c @@ -860,10 +860,11 @@ islpci_setup(struct pci_dev *pdev) priv->state_off = 1; /* initialize workqueue's */ - INIT_WORK(&priv->stats_work, prism54_update_stats); + INIT_WORK(&priv->stats_work, + (void (*)(void *)) prism54_update_stats, priv); priv->stats_timestamp = 0; - INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); + INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); priv->reset_task_pending = 0; /* allocate various memory areas */ diff --git a/trunk/drivers/net/wireless/prism54/islpci_eth.c b/trunk/drivers/net/wireless/prism54/islpci_eth.c index b1122912ee2d..676d83813dc8 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_eth.c +++ b/trunk/drivers/net/wireless/prism54/islpci_eth.c @@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv) } void -islpci_do_reset_and_wake(struct work_struct *work) +islpci_do_reset_and_wake(void *data) { - islpci_private *priv = container_of(work, islpci_private, reset_task); + islpci_private *priv = data; islpci_reset(priv, 1); priv->reset_task_pending = 0; diff --git a/trunk/drivers/net/wireless/prism54/islpci_eth.h b/trunk/drivers/net/wireless/prism54/islpci_eth.h index 5bf820defbd0..26789454067c 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_eth.h +++ b/trunk/drivers/net/wireless/prism54/islpci_eth.h @@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *); int islpci_eth_transmit(struct sk_buff *, struct net_device *); int islpci_eth_receive(islpci_private *); void islpci_eth_tx_timeout(struct net_device *); -void islpci_do_reset_and_wake(struct work_struct *); +void islpci_do_reset_and_wake(void *data); #endif /* _ISL_GEN_H */ diff --git a/trunk/drivers/net/wireless/prism54/islpci_mgt.c b/trunk/drivers/net/wireless/prism54/islpci_mgt.c index 2246f7930b4e..036a875054c9 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_mgt.c +++ b/trunk/drivers/net/wireless/prism54/islpci_mgt.c @@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev) /* Create work to handle trap out of interrupt * context. */ - INIT_WORK(&frame->ws, prism54_process_trap); + INIT_WORK(&frame->ws, prism54_process_trap, frame); schedule_work(&frame->ws); } else { diff --git a/trunk/drivers/net/wireless/zd1211rw/zd_mac.c b/trunk/drivers/net/wireless/zd1211rw/zd_mac.c index f1573a9c2336..2696f95b9278 100644 --- a/trunk/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/trunk/drivers/net/wireless/zd1211rw/zd_mac.c @@ -32,8 +32,8 @@ static void ieee_init(struct ieee80211_device *ieee); static void softmac_init(struct ieee80211softmac_device *sm); -static void set_rts_cts_work(struct work_struct *work); -static void set_basic_rates_work(struct work_struct *work); +static void set_rts_cts_work(void *d); +static void set_basic_rates_work(void *d); static void housekeeping_init(struct zd_mac *mac); static void housekeeping_enable(struct zd_mac *mac); @@ -48,8 +48,8 @@ int zd_mac_init(struct zd_mac *mac, memset(mac, 0, sizeof(*mac)); spin_lock_init(&mac->lock); mac->netdev = netdev; - INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); - INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); + INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac); + INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac); ieee_init(ieee); softmac_init(ieee80211_priv(netdev)); @@ -366,10 +366,9 @@ static void try_enable_tx(struct zd_mac *mac) spin_unlock_irqrestore(&mac->lock, flags); } -static void set_rts_cts_work(struct work_struct *work) +static void set_rts_cts_work(void *d) { - struct zd_mac *mac = - container_of(work, struct zd_mac, set_rts_cts_work.work); + struct zd_mac *mac = d; unsigned long flags; u8 rts_rate; unsigned int short_preamble; @@ -388,10 +387,9 @@ static void set_rts_cts_work(struct work_struct *work) try_enable_tx(mac); } -static void set_basic_rates_work(struct work_struct *work) +static void set_basic_rates_work(void *d) { - struct zd_mac *mac = - container_of(work, struct zd_mac, set_basic_rates_work.work); + struct zd_mac *mac = d; unsigned long flags; u16 basic_rates; @@ -469,13 +467,12 @@ static void bssinfo_change(struct net_device *netdev, u32 changes) if (need_set_rts_cts && !mac->updating_rts_rate) { mac->updating_rts_rate = 1; netif_stop_queue(mac->netdev); - queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0); + queue_work(zd_workqueue, &mac->set_rts_cts_work); } if (need_set_rates && !mac->updating_basic_rates) { mac->updating_basic_rates = 1; netif_stop_queue(mac->netdev); - queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work, - 0); + queue_work(zd_workqueue, &mac->set_basic_rates_work); } spin_unlock_irqrestore(&mac->lock, flags); } @@ -1185,10 +1182,9 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) #define LINK_LED_WORK_DELAY HZ -static void link_led_handler(struct work_struct *work) +static void link_led_handler(void *p) { - struct zd_mac *mac = - container_of(work, struct zd_mac, housekeeping.link_led_work.work); + struct zd_mac *mac = p; struct zd_chip *chip = &mac->chip; struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); int is_associated; @@ -1209,7 +1205,7 @@ static void link_led_handler(struct work_struct *work) static void housekeeping_init(struct zd_mac *mac) { - INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler); + INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); } static void housekeeping_enable(struct zd_mac *mac) diff --git a/trunk/drivers/net/wireless/zd1211rw/zd_mac.h b/trunk/drivers/net/wireless/zd1211rw/zd_mac.h index d4e8b870409d..5dcfb251f02e 100644 --- a/trunk/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/trunk/drivers/net/wireless/zd1211rw/zd_mac.h @@ -119,7 +119,7 @@ struct rx_status { #define ZD_RX_ERROR 0x80 struct housekeeping { - struct delayed_work link_led_work; + struct work_struct link_led_work; }; #define ZD_MAC_STATS_BUFFER_SIZE 16 @@ -133,8 +133,8 @@ struct zd_mac { struct iw_statistics iw_stats; struct housekeeping housekeeping; - struct delayed_work set_rts_cts_work; - struct delayed_work set_basic_rates_work; + struct work_struct set_rts_cts_work; + struct work_struct set_basic_rates_work; unsigned int stats_count; u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c index a83c3db7d18f..fc4bc9b94c74 100644 --- a/trunk/drivers/oprofile/cpu_buffer.c +++ b/trunk/drivers/oprofile/cpu_buffer.c @@ -29,7 +29,7 @@ struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; -static void wq_sync_buffer(struct work_struct *work); +static void wq_sync_buffer(void *); #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; @@ -65,7 +65,7 @@ int alloc_cpu_buffers(void) b->sample_received = 0; b->sample_lost_overflow = 0; b->cpu = i; - INIT_DELAYED_WORK(&b->work, wq_sync_buffer); + INIT_WORK(&b->work, wq_sync_buffer, b); } return 0; @@ -282,10 +282,9 @@ void oprofile_add_trace(unsigned long pc) * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ -static void wq_sync_buffer(struct work_struct *work) +static void wq_sync_buffer(void * data) { - struct oprofile_cpu_buffer * b = - container_of(work, struct oprofile_cpu_buffer, work.work); + struct oprofile_cpu_buffer * b = data; if (b->cpu != smp_processor_id()) { printk("WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); diff --git a/trunk/drivers/oprofile/cpu_buffer.h b/trunk/drivers/oprofile/cpu_buffer.h index 49900d9e3235..09abb80e0570 100644 --- a/trunk/drivers/oprofile/cpu_buffer.h +++ b/trunk/drivers/oprofile/cpu_buffer.h @@ -43,7 +43,7 @@ struct oprofile_cpu_buffer { unsigned long sample_lost_overflow; unsigned long backtrace_aborted; int cpu; - struct delayed_work work; + struct work_struct work; } ____cacheline_aligned; extern struct oprofile_cpu_buffer cpu_buffer[]; diff --git a/trunk/drivers/parport/parport_pc.c b/trunk/drivers/parport/parport_pc.c index b61c17b3e298..39c96641bc72 100644 --- a/trunk/drivers/parport/parport_pc.c +++ b/trunk/drivers/parport/parport_pc.c @@ -1975,7 +1975,7 @@ static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;} /* --- IRQ detection -------------------------------------- */ /* Only if supports ECP mode */ -static int programmable_irq_support(struct parport *pb) +static int __devinit programmable_irq_support(struct parport *pb) { int irq, intrLine; unsigned char oecr = inb (ECONTROL (pb)); @@ -1992,7 +1992,7 @@ static int programmable_irq_support(struct parport *pb) return irq; } -static int irq_probe_ECP(struct parport *pb) +static int __devinit irq_probe_ECP(struct parport *pb) { int i; unsigned long irqs; @@ -2020,7 +2020,7 @@ static int irq_probe_ECP(struct parport *pb) * This detection seems that only works in National Semiconductors * This doesn't work in SMC, LGS, and Winbond */ -static int irq_probe_EPP(struct parport *pb) +static int __devinit irq_probe_EPP(struct parport *pb) { #ifndef ADVANCED_DETECT return PARPORT_IRQ_NONE; @@ -2059,7 +2059,7 @@ static int irq_probe_EPP(struct parport *pb) #endif /* Advanced detection */ } -static int irq_probe_SPP(struct parport *pb) +static int __devinit irq_probe_SPP(struct parport *pb) { /* Don't even try to do this. */ return PARPORT_IRQ_NONE; @@ -2747,7 +2747,6 @@ enum parport_pc_pci_cards { titan_1284p2, avlab_1p, avlab_2p, - oxsemi_952, oxsemi_954, oxsemi_840, aks_0100, @@ -2823,7 +2822,6 @@ static struct parport_pc_pci { /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, /* The Oxford Semi cards are unusual: 954 doesn't support ECP, * and 840 locks up if you write 1 to bit 2! */ - /* oxsemi_952 */ { 1, { { 0, 1 }, } }, /* oxsemi_954 */ { 1, { { 0, -1 }, } }, /* oxsemi_840 */ { 1, { { 0, -1 }, } }, /* aks_0100 */ { 1, { { 0, -1 }, } }, @@ -2897,8 +2895,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */ { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p}, - { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 }, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840, diff --git a/trunk/drivers/pci/hotplug/shpchp.h b/trunk/drivers/pci/hotplug/shpchp.h index 50757695844f..ea2087c34149 100644 --- a/trunk/drivers/pci/hotplug/shpchp.h +++ b/trunk/drivers/pci/hotplug/shpchp.h @@ -70,7 +70,7 @@ struct slot { struct hotplug_slot *hotplug_slot; struct list_head slot_list; char name[SLOT_NAME_SIZE]; - struct delayed_work work; /* work for button event */ + struct work_struct work; /* work for button event */ struct mutex lock; }; @@ -187,7 +187,7 @@ extern int shpchp_configure_device(struct slot *p_slot); extern int shpchp_unconfigure_device(struct slot *p_slot); extern void shpchp_remove_ctrl_files(struct controller *ctrl); extern void cleanup_slots(struct controller *ctrl); -extern void queue_pushbutton_work(struct work_struct *work); +extern void queue_pushbutton_work(void *data); #ifdef CONFIG_ACPI diff --git a/trunk/drivers/pci/hotplug/shpchp_core.c b/trunk/drivers/pci/hotplug/shpchp_core.c index 4eac85b3d90e..235c18a22393 100644 --- a/trunk/drivers/pci/hotplug/shpchp_core.c +++ b/trunk/drivers/pci/hotplug/shpchp_core.c @@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl) goto error_info; slot->number = sun; - INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work); + INIT_WORK(&slot->work, queue_pushbutton_work, slot); /* register this slot with the hotplug pci core */ hotplug_slot->private = slot; diff --git a/trunk/drivers/pci/hotplug/shpchp_ctrl.c b/trunk/drivers/pci/hotplug/shpchp_ctrl.c index 158ac7836096..c39901dbff20 100644 --- a/trunk/drivers/pci/hotplug/shpchp_ctrl.c +++ b/trunk/drivers/pci/hotplug/shpchp_ctrl.c @@ -36,7 +36,7 @@ #include "../pci.h" #include "shpchp.h" -static void interrupt_event_handler(struct work_struct *work); +static void interrupt_event_handler(void *data); static int shpchp_enable_slot(struct slot *p_slot); static int shpchp_disable_slot(struct slot *p_slot); @@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) info->event_type = event_type; info->p_slot = p_slot; - INIT_WORK(&info->work, interrupt_event_handler); + INIT_WORK(&info->work, interrupt_event_handler, info); schedule_work(&info->work); @@ -408,10 +408,9 @@ struct pushbutton_work_info { * Handles all pending events and exits. * */ -static void shpchp_pushbutton_thread(struct work_struct *work) +static void shpchp_pushbutton_thread(void *data) { - struct pushbutton_work_info *info = - container_of(work, struct pushbutton_work_info, work); + struct pushbutton_work_info *info = data; struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); @@ -437,9 +436,9 @@ static void shpchp_pushbutton_thread(struct work_struct *work) kfree(info); } -void queue_pushbutton_work(struct work_struct *work) +void queue_pushbutton_work(void *data) { - struct slot *p_slot = container_of(work, struct slot, work.work); + struct slot *p_slot = data; struct pushbutton_work_info *info; info = kmalloc(sizeof(*info), GFP_KERNEL); @@ -448,7 +447,7 @@ void queue_pushbutton_work(struct work_struct *work) return; } info->p_slot = p_slot; - INIT_WORK(&info->work, shpchp_pushbutton_thread); + INIT_WORK(&info->work, shpchp_pushbutton_thread, info); mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -542,9 +541,9 @@ static void handle_button_press_event(struct slot *p_slot) } } -static void interrupt_event_handler(struct work_struct *work) +static void interrupt_event_handler(void *data) { - struct event_info *info = container_of(work, struct event_info, work); + struct event_info *info = data; struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); diff --git a/trunk/drivers/pci/msi.c b/trunk/drivers/pci/msi.c index 9168401401bc..9fc9a34ef24a 100644 --- a/trunk/drivers/pci/msi.c +++ b/trunk/drivers/pci/msi.c @@ -26,7 +26,7 @@ static DEFINE_SPINLOCK(msi_lock); static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; -static struct kmem_cache* msi_cachep; +static kmem_cache_t* msi_cachep; static int pci_msi_enable = 1; diff --git a/trunk/drivers/pci/pcie/aer/aerdrv.c b/trunk/drivers/pci/pcie/aer/aerdrv.c index 55866b6b26fa..04c43ef529ac 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv.c @@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) rpc->e_lock = SPIN_LOCK_UNLOCKED; rpc->rpd = dev; - INIT_WORK(&rpc->dpc_handler, aer_isr); + INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); rpc->prod_idx = rpc->cons_idx = 0; mutex_init(&rpc->rpc_mutex); init_waitqueue_head(&rpc->wait_release); diff --git a/trunk/drivers/pci/pcie/aer/aerdrv.h b/trunk/drivers/pci/pcie/aer/aerdrv.h index 3c0a58f64dd8..daf0cad88fc8 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv.h +++ b/trunk/drivers/pci/pcie/aer/aerdrv.h @@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type; extern void aer_enable_rootport(struct aer_rpc *rpc); extern void aer_delete_rootport(struct aer_rpc *rpc); extern int aer_init(struct pcie_device *dev); -extern void aer_isr(struct work_struct *work); +extern void aer_isr(void *context); extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); extern int aer_osc_setup(struct pci_dev *dev); diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c index 08e13033ced8..1c7e660d6535 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c @@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device, /** * aer_isr - consume errors detected by root port - * @work: definition of this work item + * @context: pointer to a private data of pcie device * * Invoked, as DPC, when root port records new detected error **/ -void aer_isr(struct work_struct *work) +void aer_isr(void *context) { - struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); - struct pcie_device *p_device = rpc->rpd; + struct pcie_device *p_device = (struct pcie_device *) context; + struct aer_rpc *rpc = get_service_data(p_device); struct aer_err_source *e_src; mutex_lock(&rpc->rpc_mutex); diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 6a3c1e728900..0eeac60042b3 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -873,7 +873,6 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.release = pci_release_dev; pci_dev_get(dev); - set_dev_node(&dev->dev, pcibus_to_node(bus)); dev->dev.dma_mask = &dev->dma_mask; dev->dev.coherent_dma_mask = 0xffffffffull; diff --git a/trunk/drivers/pcmcia/cs.c b/trunk/drivers/pcmcia/cs.c index 606a46740338..f9cd831a3f31 100644 --- a/trunk/drivers/pcmcia/cs.c +++ b/trunk/drivers/pcmcia/cs.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/pcmcia/ds.c b/trunk/drivers/pcmcia/ds.c index 7355eb455a88..45df12eda3c5 100644 --- a/trunk/drivers/pcmcia/ds.c +++ b/trunk/drivers/pcmcia/ds.c @@ -675,10 +675,9 @@ static int pcmcia_card_add(struct pcmcia_socket *s) } -static void pcmcia_delayed_add_device(struct work_struct *work) +static void pcmcia_delayed_add_device(void *data) { - struct pcmcia_socket *s = - container_of(work, struct pcmcia_socket, device_add); + struct pcmcia_socket *s = data; ds_dbg(1, "adding additional device to %d\n", s->sock); pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); s->pcmcia_state.device_add_pending = 0; @@ -1350,7 +1349,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev, init_waitqueue_head(&socket->queue); #endif INIT_LIST_HEAD(&socket->devices_list); - INIT_WORK(&socket->device_add, pcmcia_delayed_add_device); + INIT_WORK(&socket->device_add, pcmcia_delayed_add_device, socket); memset(&socket->pcmcia_state, 0, sizeof(u8)); socket->device_count = 0; diff --git a/trunk/drivers/pnp/card.c b/trunk/drivers/pnp/card.c index 91c047a7e635..227600cd6360 100644 --- a/trunk/drivers/pnp/card.c +++ b/trunk/drivers/pnp/card.c @@ -164,17 +164,9 @@ static DEVICE_ATTR(card_id,S_IRUGO,pnp_show_card_ids,NULL); static int pnp_interface_attach_card(struct pnp_card *card) { - int rc = device_create_file(&card->dev,&dev_attr_name); - if (rc) return rc; - - rc = device_create_file(&card->dev,&dev_attr_card_id); - if (rc) goto err_name; - + device_create_file(&card->dev,&dev_attr_name); + device_create_file(&card->dev,&dev_attr_card_id); return 0; - -err_name: - device_remove_file(&card->dev,&dev_attr_name); - return rc; } /** @@ -314,20 +306,16 @@ struct pnp_dev * pnp_request_card_device(struct pnp_card_link *clink, const char down_write(&dev->dev.bus->subsys.rwsem); dev->card_link = clink; dev->dev.driver = &drv->link.driver; - if (pnp_bus_type.probe(&dev->dev)) - goto err_out; - if (device_bind_driver(&dev->dev)) - goto err_out; - + if (pnp_bus_type.probe(&dev->dev)) { + dev->dev.driver = NULL; + dev->card_link = NULL; + up_write(&dev->dev.bus->subsys.rwsem); + return NULL; + } + device_bind_driver(&dev->dev); up_write(&dev->dev.bus->subsys.rwsem); return dev; - -err_out: - dev->dev.driver = NULL; - dev->card_link = NULL; - up_write(&dev->dev.bus->subsys.rwsem); - return NULL; } /** diff --git a/trunk/drivers/pnp/interface.c b/trunk/drivers/pnp/interface.c index ac9fcd499f3f..9d8b415eca79 100644 --- a/trunk/drivers/pnp/interface.c +++ b/trunk/drivers/pnp/interface.c @@ -461,19 +461,8 @@ static DEVICE_ATTR(id,S_IRUGO,pnp_show_current_ids,NULL); int pnp_interface_attach_device(struct pnp_dev *dev) { - int rc = device_create_file(&dev->dev,&dev_attr_options); - if (rc) goto err; - rc = device_create_file(&dev->dev,&dev_attr_resources); - if (rc) goto err_opt; - rc = device_create_file(&dev->dev,&dev_attr_id); - if (rc) goto err_res; - + device_create_file(&dev->dev,&dev_attr_options); + device_create_file(&dev->dev,&dev_attr_resources); + device_create_file(&dev->dev,&dev_attr_id); return 0; - -err_res: - device_remove_file(&dev->dev,&dev_attr_resources); -err_opt: - device_remove_file(&dev->dev,&dev_attr_options); -err: - return rc; } diff --git a/trunk/drivers/pnp/pnpbios/core.c b/trunk/drivers/pnp/pnpbios/core.c index 81186f479a3f..81a6c83d89a6 100644 --- a/trunk/drivers/pnp/pnpbios/core.c +++ b/trunk/drivers/pnp/pnpbios/core.c @@ -61,7 +61,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig index 2a63ab2b47f4..fc766a7a611e 100644 --- a/trunk/drivers/rtc/Kconfig +++ b/trunk/drivers/rtc/Kconfig @@ -154,23 +154,15 @@ config RTC_DRV_DS1672 will be called rtc-ds1672. config RTC_DRV_DS1742 - tristate "Dallas DS1742/1743" + tristate "Dallas DS1742" depends on RTC_CLASS help If you say yes here you get support for the - Dallas DS1742/1743 timekeeping chip. + Dallas DS1742 timekeeping chip. This driver can also be built as a module. If so, the module will be called rtc-ds1742. -config RTC_DRV_OMAP - tristate "TI OMAP1" - depends on RTC_CLASS && ( \ - ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 ) - help - Say "yes" here to support the real time clock on TI OMAP1 chips. - This driver can also be built as a module called rtc-omap. - config RTC_DRV_PCF8563 tristate "Philips PCF8563/Epson RTC8564" depends on RTC_CLASS && I2C diff --git a/trunk/drivers/rtc/Makefile b/trunk/drivers/rtc/Makefile index bd4c45d333f0..3ba5ff6e6800 100644 --- a/trunk/drivers/rtc/Makefile +++ b/trunk/drivers/rtc/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o -obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o diff --git a/trunk/drivers/rtc/rtc-dev.c b/trunk/drivers/rtc/rtc-dev.c index 828b329e08e0..814b9e1873f5 100644 --- a/trunk/drivers/rtc/rtc-dev.c +++ b/trunk/drivers/rtc/rtc-dev.c @@ -53,10 +53,9 @@ static int rtc_dev_open(struct inode *inode, struct file *file) * Routine to poll RTC seconds field for change as often as possible, * after first RTC_UIE use timer to reduce polling */ -static void rtc_uie_task(struct work_struct *work) +static void rtc_uie_task(void *data) { - struct rtc_device *rtc = - container_of(work, struct rtc_device, uie_task); + struct rtc_device *rtc = data; struct rtc_time tm; int num = 0; int err; @@ -412,7 +411,7 @@ static int rtc_dev_add_device(struct class_device *class_dev, spin_lock_init(&rtc->irq_lock); init_waitqueue_head(&rtc->irq_queue); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL - INIT_WORK(&rtc->uie_task, rtc_uie_task); + INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); #endif diff --git a/trunk/drivers/rtc/rtc-ds1672.c b/trunk/drivers/rtc/rtc-ds1672.c index dfef1637bfb8..67e816a9a39f 100644 --- a/trunk/drivers/rtc/rtc-ds1672.c +++ b/trunk/drivers/rtc/rtc-ds1672.c @@ -237,22 +237,17 @@ static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind) /* read control register */ err = ds1672_get_control(client, &control); if (err) - goto exit_devreg; + goto exit_detach; if (control & DS1672_REG_CONTROL_EOSC) dev_warn(&client->dev, "Oscillator not enabled. " "Set time to enable.\n"); /* Register sysfs hooks */ - err = device_create_file(&client->dev, &dev_attr_control); - if (err) - goto exit_devreg; + device_create_file(&client->dev, &dev_attr_control); return 0; -exit_devreg: - rtc_device_unregister(rtc); - exit_detach: i2c_detach_client(client); diff --git a/trunk/drivers/rtc/rtc-ds1742.c b/trunk/drivers/rtc/rtc-ds1742.c index 17633bfa8480..6273a3d240a2 100644 --- a/trunk/drivers/rtc/rtc-ds1742.c +++ b/trunk/drivers/rtc/rtc-ds1742.c @@ -6,10 +6,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Copyright (C) 2006 Torsten Ertbjerg Rasmussen - * - nvram size determined from resource - * - this ds1742 driver now supports ds1743. */ #include @@ -21,19 +17,20 @@ #include #include -#define DRV_VERSION "0.3" +#define DRV_VERSION "0.2" -#define RTC_SIZE 8 +#define RTC_REG_SIZE 0x800 +#define RTC_OFFSET 0x7f8 -#define RTC_CONTROL 0 -#define RTC_CENTURY 0 -#define RTC_SECONDS 1 -#define RTC_MINUTES 2 -#define RTC_HOURS 3 -#define RTC_DAY 4 -#define RTC_DATE 5 -#define RTC_MONTH 6 -#define RTC_YEAR 7 +#define RTC_CONTROL (RTC_OFFSET + 0) +#define RTC_CENTURY (RTC_OFFSET + 0) +#define RTC_SECONDS (RTC_OFFSET + 1) +#define RTC_MINUTES (RTC_OFFSET + 2) +#define RTC_HOURS (RTC_OFFSET + 3) +#define RTC_DAY (RTC_OFFSET + 4) +#define RTC_DATE (RTC_OFFSET + 5) +#define RTC_MONTH (RTC_OFFSET + 6) +#define RTC_YEAR (RTC_OFFSET + 7) #define RTC_CENTURY_MASK 0x3f #define RTC_SECONDS_MASK 0x7f @@ -51,10 +48,7 @@ struct rtc_plat_data { struct rtc_device *rtc; - void __iomem *ioaddr_nvram; - void __iomem *ioaddr_rtc; - size_t size_nvram; - size_t size; + void __iomem *ioaddr; unsigned long baseaddr; unsigned long last_jiffies; }; @@ -63,7 +57,7 @@ static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - void __iomem *ioaddr = pdata->ioaddr_rtc; + void __iomem *ioaddr = pdata->ioaddr; u8 century; century = BIN2BCD((tm->tm_year + 1900) / 100); @@ -88,7 +82,7 @@ static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - void __iomem *ioaddr = pdata->ioaddr_rtc; + void __iomem *ioaddr = pdata->ioaddr; unsigned int year, month, day, hour, minute, second, week; unsigned int century; @@ -133,10 +127,10 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj, char *buf, struct platform_device *pdev = to_platform_device(container_of(kobj, struct device, kobj)); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - void __iomem *ioaddr = pdata->ioaddr_nvram; + void __iomem *ioaddr = pdata->ioaddr; ssize_t count; - for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--) + for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--) *buf++ = readb(ioaddr + pos++); return count; } @@ -147,10 +141,10 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj, char *buf, struct platform_device *pdev = to_platform_device(container_of(kobj, struct device, kobj)); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - void __iomem *ioaddr = pdata->ioaddr_nvram; + void __iomem *ioaddr = pdata->ioaddr; ssize_t count; - for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--) + for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--) writeb(*buf++, ioaddr + pos++); return count; } @@ -161,6 +155,7 @@ static struct bin_attribute ds1742_nvram_attr = { .mode = S_IRUGO | S_IWUGO, .owner = THIS_MODULE, }, + .size = RTC_OFFSET, .read = ds1742_nvram_read, .write = ds1742_nvram_write, }; @@ -180,23 +175,19 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev) pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - pdata->size = res->end - res->start + 1; - if (!request_mem_region(res->start, pdata->size, pdev->name)) { + if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) { ret = -EBUSY; goto out; } pdata->baseaddr = res->start; - ioaddr = ioremap(pdata->baseaddr, pdata->size); + ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE); if (!ioaddr) { ret = -ENOMEM; goto out; } - pdata->ioaddr_nvram = ioaddr; - pdata->size_nvram = pdata->size - RTC_SIZE; - pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; + pdata->ioaddr = ioaddr; /* turn RTC on if it was not on */ - ioaddr = pdata->ioaddr_rtc; sec = readb(ioaddr + RTC_SECONDS); if (sec & RTC_STOP) { sec &= RTC_SECONDS_MASK; @@ -217,8 +208,6 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev) pdata->rtc = rtc; pdata->last_jiffies = jiffies; platform_set_drvdata(pdev, pdata); - ds1742_nvram_attr.size = max(ds1742_nvram_attr.size, - pdata->size_nvram); ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); if (ret) goto out; @@ -226,10 +215,10 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev) out: if (pdata->rtc) rtc_device_unregister(pdata->rtc); - if (pdata->ioaddr_nvram) - iounmap(pdata->ioaddr_nvram); + if (ioaddr) + iounmap(ioaddr); if (pdata->baseaddr) - release_mem_region(pdata->baseaddr, pdata->size); + release_mem_region(pdata->baseaddr, RTC_REG_SIZE); kfree(pdata); return ret; } @@ -240,8 +229,8 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev) sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); rtc_device_unregister(pdata->rtc); - iounmap(pdata->ioaddr_nvram); - release_mem_region(pdata->baseaddr, pdata->size); + iounmap(pdata->ioaddr); + release_mem_region(pdata->baseaddr, RTC_REG_SIZE); kfree(pdata); return 0; } diff --git a/trunk/drivers/rtc/rtc-omap.c b/trunk/drivers/rtc/rtc-omap.c deleted file mode 100644 index eac5fb1fc02f..000000000000 --- a/trunk/drivers/rtc/rtc-omap.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * TI OMAP1 Real Time Clock interface for Linux - * - * Copyright (C) 2003 MontaVista Software, Inc. - * Author: George G. Davis or - * - * Copyright (C) 2006 David Brownell (new RTC framework) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - - -/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock - * with century-range alarm matching, driven by the 32kHz clock. - * - * The main user-visible ways it differs from PC RTCs are by omitting - * "don't care" alarm fields and sub-second periodic IRQs, and having - * an autoadjust mechanism to calibrate to the true oscillator rate. - * - * Board-specific wiring options include using split power mode with - * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset), - * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from - * low power modes). See the BOARD-SPECIFIC CUSTOMIZATION comment. - */ - -#define OMAP_RTC_BASE 0xfffb4800 - -/* RTC registers */ -#define OMAP_RTC_SECONDS_REG 0x00 -#define OMAP_RTC_MINUTES_REG 0x04 -#define OMAP_RTC_HOURS_REG 0x08 -#define OMAP_RTC_DAYS_REG 0x0C -#define OMAP_RTC_MONTHS_REG 0x10 -#define OMAP_RTC_YEARS_REG 0x14 -#define OMAP_RTC_WEEKS_REG 0x18 - -#define OMAP_RTC_ALARM_SECONDS_REG 0x20 -#define OMAP_RTC_ALARM_MINUTES_REG 0x24 -#define OMAP_RTC_ALARM_HOURS_REG 0x28 -#define OMAP_RTC_ALARM_DAYS_REG 0x2c -#define OMAP_RTC_ALARM_MONTHS_REG 0x30 -#define OMAP_RTC_ALARM_YEARS_REG 0x34 - -#define OMAP_RTC_CTRL_REG 0x40 -#define OMAP_RTC_STATUS_REG 0x44 -#define OMAP_RTC_INTERRUPTS_REG 0x48 - -#define OMAP_RTC_COMP_LSB_REG 0x4c -#define OMAP_RTC_COMP_MSB_REG 0x50 -#define OMAP_RTC_OSC_REG 0x54 - -/* OMAP_RTC_CTRL_REG bit fields: */ -#define OMAP_RTC_CTRL_SPLIT (1<<7) -#define OMAP_RTC_CTRL_DISABLE (1<<6) -#define OMAP_RTC_CTRL_SET_32_COUNTER (1<<5) -#define OMAP_RTC_CTRL_TEST (1<<4) -#define OMAP_RTC_CTRL_MODE_12_24 (1<<3) -#define OMAP_RTC_CTRL_AUTO_COMP (1<<2) -#define OMAP_RTC_CTRL_ROUND_30S (1<<1) -#define OMAP_RTC_CTRL_STOP (1<<0) - -/* OMAP_RTC_STATUS_REG bit fields: */ -#define OMAP_RTC_STATUS_POWER_UP (1<<7) -#define OMAP_RTC_STATUS_ALARM (1<<6) -#define OMAP_RTC_STATUS_1D_EVENT (1<<5) -#define OMAP_RTC_STATUS_1H_EVENT (1<<4) -#define OMAP_RTC_STATUS_1M_EVENT (1<<3) -#define OMAP_RTC_STATUS_1S_EVENT (1<<2) -#define OMAP_RTC_STATUS_RUN (1<<1) -#define OMAP_RTC_STATUS_BUSY (1<<0) - -/* OMAP_RTC_INTERRUPTS_REG bit fields: */ -#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3) -#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2) - - -#define rtc_read(addr) omap_readb(OMAP_RTC_BASE + (addr)) -#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr)) - - -/* platform_bus isn't hotpluggable, so for static linkage it'd be safe - * to get rid of probe() and remove() code ... too bad the driver struct - * remembers probe(), that's about 25% of the runtime footprint!! - */ -#ifndef MODULE -#undef __devexit -#undef __devexit_p -#define __devexit __exit -#define __devexit_p __exit_p -#endif - - -/* we rely on the rtc framework to handle locking (rtc->ops_lock), - * so the only other requirement is that register accesses which - * require BUSY to be clear are made with IRQs locally disabled - */ -static void rtc_wait_not_busy(void) -{ - int count = 0; - u8 status; - - /* BUSY may stay active for 1/32768 second (~30 usec) */ - for (count = 0; count < 50; count++) { - status = rtc_read(OMAP_RTC_STATUS_REG); - if ((status & (u8)OMAP_RTC_STATUS_BUSY) == 0) - break; - udelay(1); - } - /* now we have ~15 usec to read/write various registers */ -} - -static irqreturn_t rtc_irq(int irq, void *class_dev) -{ - unsigned long events = 0; - u8 irq_data; - - irq_data = rtc_read(OMAP_RTC_STATUS_REG); - - /* alarm irq? */ - if (irq_data & OMAP_RTC_STATUS_ALARM) { - rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG); - events |= RTC_IRQF | RTC_AF; - } - - /* 1/sec periodic/update irq? */ - if (irq_data & OMAP_RTC_STATUS_1S_EVENT) - events |= RTC_IRQF | RTC_UF; - - rtc_update_irq(class_dev, 1, events); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_RTC_INTF_DEV - -static int -omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) -{ - u8 reg; - - switch (cmd) { - case RTC_AIE_OFF: - case RTC_AIE_ON: - case RTC_UIE_OFF: - case RTC_UIE_ON: - break; - default: - return -ENOIOCTLCMD; - } - - local_irq_disable(); - rtc_wait_not_busy(); - reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); - switch (cmd) { - /* AIE = Alarm Interrupt Enable */ - case RTC_AIE_OFF: - reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; - break; - case RTC_AIE_ON: - reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; - break; - /* UIE = Update Interrupt Enable (1/second) */ - case RTC_UIE_OFF: - reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; - break; - case RTC_UIE_ON: - reg |= OMAP_RTC_INTERRUPTS_IT_TIMER; - break; - } - rtc_wait_not_busy(); - rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); - local_irq_enable(); - - return 0; -} - -#else -#define omap_rtc_ioctl NULL -#endif - -/* this hardware doesn't support "don't care" alarm fields */ -static int tm2bcd(struct rtc_time *tm) -{ - if (rtc_valid_tm(tm) != 0) - return -EINVAL; - - tm->tm_sec = BIN2BCD(tm->tm_sec); - tm->tm_min = BIN2BCD(tm->tm_min); - tm->tm_hour = BIN2BCD(tm->tm_hour); - tm->tm_mday = BIN2BCD(tm->tm_mday); - - tm->tm_mon = BIN2BCD(tm->tm_mon + 1); - - /* epoch == 1900 */ - if (tm->tm_year < 100 || tm->tm_year > 199) - return -EINVAL; - tm->tm_year = BIN2BCD(tm->tm_year - 100); - - return 0; -} - -static void bcd2tm(struct rtc_time *tm) -{ - tm->tm_sec = BCD2BIN(tm->tm_sec); - tm->tm_min = BCD2BIN(tm->tm_min); - tm->tm_hour = BCD2BIN(tm->tm_hour); - tm->tm_mday = BCD2BIN(tm->tm_mday); - tm->tm_mon = BCD2BIN(tm->tm_mon) - 1; - /* epoch == 1900 */ - tm->tm_year = BCD2BIN(tm->tm_year) + 100; -} - - -static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - /* we don't report wday/yday/isdst ... */ - local_irq_disable(); - rtc_wait_not_busy(); - - tm->tm_sec = rtc_read(OMAP_RTC_SECONDS_REG); - tm->tm_min = rtc_read(OMAP_RTC_MINUTES_REG); - tm->tm_hour = rtc_read(OMAP_RTC_HOURS_REG); - tm->tm_mday = rtc_read(OMAP_RTC_DAYS_REG); - tm->tm_mon = rtc_read(OMAP_RTC_MONTHS_REG); - tm->tm_year = rtc_read(OMAP_RTC_YEARS_REG); - - local_irq_enable(); - - bcd2tm(tm); - return 0; -} - -static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - if (tm2bcd(tm) < 0) - return -EINVAL; - local_irq_disable(); - rtc_wait_not_busy(); - - rtc_write(tm->tm_year, OMAP_RTC_YEARS_REG); - rtc_write(tm->tm_mon, OMAP_RTC_MONTHS_REG); - rtc_write(tm->tm_mday, OMAP_RTC_DAYS_REG); - rtc_write(tm->tm_hour, OMAP_RTC_HOURS_REG); - rtc_write(tm->tm_min, OMAP_RTC_MINUTES_REG); - rtc_write(tm->tm_sec, OMAP_RTC_SECONDS_REG); - - local_irq_enable(); - - return 0; -} - -static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - local_irq_disable(); - rtc_wait_not_busy(); - - alm->time.tm_sec = rtc_read(OMAP_RTC_ALARM_SECONDS_REG); - alm->time.tm_min = rtc_read(OMAP_RTC_ALARM_MINUTES_REG); - alm->time.tm_hour = rtc_read(OMAP_RTC_ALARM_HOURS_REG); - alm->time.tm_mday = rtc_read(OMAP_RTC_ALARM_DAYS_REG); - alm->time.tm_mon = rtc_read(OMAP_RTC_ALARM_MONTHS_REG); - alm->time.tm_year = rtc_read(OMAP_RTC_ALARM_YEARS_REG); - - local_irq_enable(); - - bcd2tm(&alm->time); - alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG) - & OMAP_RTC_INTERRUPTS_IT_ALARM); - alm->enabled = alm->pending && device_may_wakeup(dev); - - return 0; -} - -static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - u8 reg; - - /* Much userspace code uses RTC_ALM_SET, thus "don't care" for - * day/month/year specifies alarms up to 24 hours in the future. - * So we need to handle that ... but let's ignore the "don't care" - * values for hours/minutes/seconds. - */ - if (alm->time.tm_mday <= 0 - && alm->time.tm_mon < 0 - && alm->time.tm_year < 0) { - struct rtc_time tm; - unsigned long now, then; - - omap_rtc_read_time(dev, &tm); - rtc_tm_to_time(&tm, &now); - - alm->time.tm_mday = tm.tm_mday; - alm->time.tm_mon = tm.tm_mon; - alm->time.tm_year = tm.tm_year; - rtc_tm_to_time(&alm->time, &then); - - /* sometimes the alarm wraps into tomorrow */ - if (then < now) { - rtc_time_to_tm(now + 24 * 60 * 60, &tm); - alm->time.tm_mday = tm.tm_mday; - alm->time.tm_mon = tm.tm_mon; - alm->time.tm_year = tm.tm_year; - } - } - - if (tm2bcd(&alm->time) < 0) - return -EINVAL; - - local_irq_disable(); - rtc_wait_not_busy(); - - rtc_write(alm->time.tm_year, OMAP_RTC_ALARM_YEARS_REG); - rtc_write(alm->time.tm_mon, OMAP_RTC_ALARM_MONTHS_REG); - rtc_write(alm->time.tm_mday, OMAP_RTC_ALARM_DAYS_REG); - rtc_write(alm->time.tm_hour, OMAP_RTC_ALARM_HOURS_REG); - rtc_write(alm->time.tm_min, OMAP_RTC_ALARM_MINUTES_REG); - rtc_write(alm->time.tm_sec, OMAP_RTC_ALARM_SECONDS_REG); - - reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); - if (alm->enabled) - reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; - else - reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; - rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); - - local_irq_enable(); - - return 0; -} - -static struct rtc_class_ops omap_rtc_ops = { - .ioctl = omap_rtc_ioctl, - .read_time = omap_rtc_read_time, - .set_time = omap_rtc_set_time, - .read_alarm = omap_rtc_read_alarm, - .set_alarm = omap_rtc_set_alarm, -}; - -static int omap_rtc_alarm; -static int omap_rtc_timer; - -static int __devinit omap_rtc_probe(struct platform_device *pdev) -{ - struct resource *res, *mem; - struct rtc_device *rtc; - u8 reg, new_ctrl; - - omap_rtc_timer = platform_get_irq(pdev, 0); - if (omap_rtc_timer <= 0) { - pr_debug("%s: no update irq?\n", pdev->name); - return -ENOENT; - } - - omap_rtc_alarm = platform_get_irq(pdev, 1); - if (omap_rtc_alarm <= 0) { - pr_debug("%s: no alarm irq?\n", pdev->name); - return -ENOENT; - } - - /* NOTE: using static mapping for RTC registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res && res->start != OMAP_RTC_BASE) { - pr_debug("%s: RTC registers at %08x, expected %08x\n", - pdev->name, (unsigned) res->start, OMAP_RTC_BASE); - return -ENOENT; - } - - if (res) - mem = request_mem_region(res->start, - res->end - res->start + 1, - pdev->name); - else - mem = NULL; - if (!mem) { - pr_debug("%s: RTC registers at %08x are not free\n", - pdev->name, OMAP_RTC_BASE); - return -EBUSY; - } - - rtc = rtc_device_register(pdev->name, &pdev->dev, - &omap_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { - pr_debug("%s: can't register RTC device, err %ld\n", - pdev->name, PTR_ERR(rtc)); - goto fail; - } - platform_set_drvdata(pdev, rtc); - class_set_devdata(&rtc->class_dev, mem); - - /* clear pending irqs, and set 1/second periodic, - * which we'll use instead of update irqs - */ - rtc_write(0, OMAP_RTC_INTERRUPTS_REG); - - /* clear old status */ - reg = rtc_read(OMAP_RTC_STATUS_REG); - if (reg & (u8) OMAP_RTC_STATUS_POWER_UP) { - pr_info("%s: RTC power up reset detected\n", - pdev->name); - rtc_write(OMAP_RTC_STATUS_POWER_UP, OMAP_RTC_STATUS_REG); - } - if (reg & (u8) OMAP_RTC_STATUS_ALARM) - rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG); - - /* handle periodic and alarm irqs */ - if (request_irq(omap_rtc_timer, rtc_irq, SA_INTERRUPT, - rtc->class_dev.class_id, &rtc->class_dev)) { - pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n", - pdev->name, omap_rtc_timer); - goto fail0; - } - if (request_irq(omap_rtc_alarm, rtc_irq, SA_INTERRUPT, - rtc->class_dev.class_id, &rtc->class_dev)) { - pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n", - pdev->name, omap_rtc_alarm); - goto fail1; - } - - /* On boards with split power, RTC_ON_NOFF won't reset the RTC */ - reg = rtc_read(OMAP_RTC_CTRL_REG); - if (reg & (u8) OMAP_RTC_CTRL_STOP) - pr_info("%s: already running\n", pdev->name); - - /* force to 24 hour mode */ - new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); - new_ctrl |= OMAP_RTC_CTRL_STOP; - - /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: - * - * - Boards wired so that RTC_WAKE_INT does something, and muxed - * right (W13_1610_RTC_WAKE_INT is the default after chip reset), - * should initialize the device wakeup flag appropriately. - * - * - Boards wired so RTC_ON_nOFF is used as the reset signal, - * rather than nPWRON_RESET, should forcibly enable split - * power mode. (Some chip errata report that RTC_CTRL_SPLIT - * is write-only, and always reads as zero...) - */ - device_init_wakeup(&pdev->dev, 0); - - if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) - pr_info("%s: split power mode\n", pdev->name); - - if (reg != new_ctrl) - rtc_write(new_ctrl, OMAP_RTC_CTRL_REG); - - return 0; - -fail1: - free_irq(omap_rtc_timer, NULL); -fail0: - rtc_device_unregister(rtc); -fail: - release_resource(mem); - return -EIO; -} - -static int __devexit omap_rtc_remove(struct platform_device *pdev) -{ - struct rtc_device *rtc = platform_get_drvdata(pdev);; - - device_init_wakeup(&pdev->dev, 0); - - /* leave rtc running, but disable irqs */ - rtc_write(0, OMAP_RTC_INTERRUPTS_REG); - - free_irq(omap_rtc_timer, rtc); - free_irq(omap_rtc_alarm, rtc); - - release_resource(class_get_devdata(&rtc->class_dev)); - rtc_device_unregister(rtc); - return 0; -} - -#ifdef CONFIG_PM - -static struct timespec rtc_delta; -static u8 irqstat; - -static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct rtc_time rtc_tm; - struct timespec time; - - time.tv_nsec = 0; - omap_rtc_read_time(NULL, &rtc_tm); - rtc_tm_to_time(&rtc_tm, &time.tv_sec); - - save_time_delta(&rtc_delta, &time); - irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG); - - /* FIXME the RTC alarm is not currently acting as a wakeup event - * source, and in fact this enable() call is just saving a flag - * that's never used... - */ - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(omap_rtc_alarm); - else - rtc_write(0, OMAP_RTC_INTERRUPTS_REG); - - return 0; -} - -static int omap_rtc_resume(struct platform_device *pdev) -{ - struct rtc_time rtc_tm; - struct timespec time; - - time.tv_nsec = 0; - omap_rtc_read_time(NULL, &rtc_tm); - rtc_tm_to_time(&rtc_tm, &time.tv_sec); - - restore_time_delta(&rtc_delta, &time); - if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(omap_rtc_alarm); - else - rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG); - return 0; -} - -#else -#define omap_rtc_suspend NULL -#define omap_rtc_resume NULL -#endif - -static void omap_rtc_shutdown(struct platform_device *pdev) -{ - rtc_write(0, OMAP_RTC_INTERRUPTS_REG); -} - -MODULE_ALIAS("omap_rtc"); -static struct platform_driver omap_rtc_driver = { - .probe = omap_rtc_probe, - .remove = __devexit_p(omap_rtc_remove), - .suspend = omap_rtc_suspend, - .resume = omap_rtc_resume, - .shutdown = omap_rtc_shutdown, - .driver = { - .name = "omap_rtc", - .owner = THIS_MODULE, - }, -}; - -static int __init rtc_init(void) -{ - return platform_driver_register(&omap_rtc_driver); -} -module_init(rtc_init); - -static void __exit rtc_exit(void) -{ - platform_driver_unregister(&omap_rtc_driver); -} -module_exit(rtc_exit); - -MODULE_AUTHOR("George G. Davis (and others)"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/rtc/rtc-rs5c372.c b/trunk/drivers/rtc/rtc-rs5c372.c index e2c7698fdba3..a44fe4efa216 100644 --- a/trunk/drivers/rtc/rtc-rs5c372.c +++ b/trunk/drivers/rtc/rtc-rs5c372.c @@ -13,7 +13,7 @@ #include #include -#define DRV_VERSION "0.3" +#define DRV_VERSION "0.2" /* Addresses to scan */ static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; @@ -39,14 +39,6 @@ static int rs5c372_attach(struct i2c_adapter *adapter); static int rs5c372_detach(struct i2c_client *client); static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind); -struct rs5c372 { - u8 reg_addr; - u8 regs[17]; - struct i2c_msg msg[1]; - struct i2c_client client; - struct rtc_device *rtc; -}; - static struct i2c_driver rs5c372_driver = { .driver = { .name = "rs5c372", @@ -57,16 +49,18 @@ static struct i2c_driver rs5c372_driver = { static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) { + unsigned char buf[7] = { RS5C372_REG_BASE }; - struct rs5c372 *rs5c372 = i2c_get_clientdata(client); - u8 *buf = &(rs5c372->regs[1]); - - /* this implements the 3rd reading method, according - * to the datasheet. rs5c372 defaults to internal - * address 0xF, so 0x0 is in regs[1] + /* this implements the 1st reading method, according + * to the datasheet. buf[0] is initialized with + * address ptr and transmission format register. */ + struct i2c_msg msgs[] = { + { client->addr, 0, 1, buf }, + { client->addr, I2C_M_RD, 7, buf }, + }; - if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) { + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { dev_err(&client->dev, "%s: read error\n", __FUNCTION__); return -EIO; } @@ -120,14 +114,23 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) { - struct rs5c372 *rs5c372 = i2c_get_clientdata(client); - u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1]; + unsigned char buf = RS5C372_REG_TRIM; + + struct i2c_msg msgs[] = { + { client->addr, 0, 1, &buf }, + { client->addr, I2C_M_RD, 1, &buf }, + }; + + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } if (osc) - *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; + *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768; if (trim) { - *trim = tmp & RS5C372_TRIM_MASK; + *trim = buf & RS5C372_TRIM_MASK; dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim); } @@ -198,7 +201,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) { int err = 0; struct i2c_client *client; - struct rs5c372 *rs5c372; + struct rtc_device *rtc; dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); @@ -207,11 +210,10 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) goto exit; } - if (!(rs5c372 = kzalloc(sizeof(struct rs5c372), GFP_KERNEL))) { + if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - client = &rs5c372->client; /* I2C client */ client->addr = address; @@ -220,47 +222,32 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE); - i2c_set_clientdata(client, rs5c372); - - rs5c372->msg[0].addr = address; - rs5c372->msg[0].flags = I2C_M_RD; - rs5c372->msg[0].len = sizeof(rs5c372->regs); - rs5c372->msg[0].buf = rs5c372->regs; - /* Inform the i2c layer */ if ((err = i2c_attach_client(client))) goto exit_kfree; dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); - rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, - &client->dev, &rs5c372_rtc_ops, THIS_MODULE); + rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev, + &rs5c372_rtc_ops, THIS_MODULE); - if (IS_ERR(rs5c372->rtc)) { - err = PTR_ERR(rs5c372->rtc); + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); goto exit_detach; } - err = device_create_file(&client->dev, &dev_attr_trim); - if (err) - goto exit_devreg; - err = device_create_file(&client->dev, &dev_attr_osc); - if (err) - goto exit_trim; + i2c_set_clientdata(client, rtc); - return 0; - -exit_trim: - device_remove_file(&client->dev, &dev_attr_trim); + device_create_file(&client->dev, &dev_attr_trim); + device_create_file(&client->dev, &dev_attr_osc); -exit_devreg: - rtc_device_unregister(rs5c372->rtc); + return 0; exit_detach: i2c_detach_client(client); exit_kfree: - kfree(rs5c372); + kfree(client); exit: return err; @@ -269,15 +256,16 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) static int rs5c372_detach(struct i2c_client *client) { int err; - struct rs5c372 *rs5c372 = i2c_get_clientdata(client); + struct rtc_device *rtc = i2c_get_clientdata(client); - if (rs5c372->rtc) - rtc_device_unregister(rs5c372->rtc); + if (rtc) + rtc_device_unregister(rtc); if ((err = i2c_detach_client(client))) return err; - kfree(rs5c372); + kfree(client); + return 0; } diff --git a/trunk/drivers/rtc/rtc-test.c b/trunk/drivers/rtc/rtc-test.c index f50a1b8e1607..6ef9c62d5032 100644 --- a/trunk/drivers/rtc/rtc-test.c +++ b/trunk/drivers/rtc/rtc-test.c @@ -123,18 +123,11 @@ static int test_probe(struct platform_device *plat_dev) err = PTR_ERR(rtc); return err; } - - err = device_create_file(&plat_dev->dev, &dev_attr_irq); - if (err) - goto err; + device_create_file(&plat_dev->dev, &dev_attr_irq); platform_set_drvdata(plat_dev, rtc); return 0; - -err: - rtc_device_unregister(rtc); - return err; } static int __devexit test_remove(struct platform_device *plat_dev) diff --git a/trunk/drivers/rtc/rtc-x1205.c b/trunk/drivers/rtc/rtc-x1205.c index 9a67487d086b..522c69753bbf 100644 --- a/trunk/drivers/rtc/rtc-x1205.c +++ b/trunk/drivers/rtc/rtc-x1205.c @@ -562,19 +562,11 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) else dev_err(&client->dev, "couldn't read status\n"); - err = device_create_file(&client->dev, &dev_attr_atrim); - if (err) goto exit_devreg; - err = device_create_file(&client->dev, &dev_attr_dtrim); - if (err) goto exit_atrim; + device_create_file(&client->dev, &dev_attr_atrim); + device_create_file(&client->dev, &dev_attr_dtrim); return 0; -exit_atrim: - device_remove_file(&client->dev, &dev_attr_atrim); - -exit_devreg: - rtc_device_unregister(rtc); - exit_detach: i2c_detach_client(client); diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index 2af2d9b53d18..a2cef57d7bcb 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -54,7 +54,7 @@ static void dasd_flush_request_queue(struct dasd_device *); static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); static int dasd_flush_ccw_queue(struct dasd_device *, int); static void dasd_tasklet(struct dasd_device *); -static void do_kick_device(struct work_struct *); +static void do_kick_device(void *data); /* * SECTION: Operations on the device structure. @@ -100,7 +100,7 @@ dasd_alloc_device(void) (unsigned long) device); INIT_LIST_HEAD(&device->ccw_queue); init_timer(&device->timer); - INIT_WORK(&device->kick_work, do_kick_device); + INIT_WORK(&device->kick_work, do_kick_device, device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; @@ -407,9 +407,11 @@ dasd_change_state(struct dasd_device *device) * event daemon. */ static void -do_kick_device(struct work_struct *work) +do_kick_device(void *data) { - struct dasd_device *device = container_of(work, struct dasd_device, kick_work); + struct dasd_device *device; + + device = (struct dasd_device *) data; dasd_change_state(device); dasd_schedule_bh(device); dasd_put_device(device); diff --git a/trunk/drivers/s390/block/dasd_devmap.c b/trunk/drivers/s390/block/dasd_devmap.c index cf28ccc57948..17fdd8c9f740 100644 --- a/trunk/drivers/s390/block/dasd_devmap.c +++ b/trunk/drivers/s390/block/dasd_devmap.c @@ -25,7 +25,7 @@ #include "dasd_int.h" -struct kmem_cache *dasd_page_cache; +kmem_cache_t *dasd_page_cache; EXPORT_SYMBOL_GPL(dasd_page_cache); /* diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index fdaa471e845f..5ecea3e4fdef 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -1215,7 +1215,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) dst = page_address(bv->bv_page) + bv->bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, - GFP_DMA | __GFP_NOWARN); + SLAB_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) memcpy(copy + bv->bv_offset, dst, bv->bv_len); if (copy) diff --git a/trunk/drivers/s390/block/dasd_fba.c b/trunk/drivers/s390/block/dasd_fba.c index b857fd5893fd..80926c548228 100644 --- a/trunk/drivers/s390/block/dasd_fba.c +++ b/trunk/drivers/s390/block/dasd_fba.c @@ -308,7 +308,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) dst = page_address(bv->bv_page) + bv->bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, - GFP_DMA | __GFP_NOWARN); + SLAB_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) memcpy(copy + bv->bv_offset, dst, bv->bv_len); if (copy) diff --git a/trunk/drivers/s390/block/dasd_int.h b/trunk/drivers/s390/block/dasd_int.h index dc5dd509434d..9f52004f6fc2 100644 --- a/trunk/drivers/s390/block/dasd_int.h +++ b/trunk/drivers/s390/block/dasd_int.h @@ -474,7 +474,7 @@ extern struct dasd_profile_info_t dasd_global_profile; extern unsigned int dasd_profile_level; extern struct block_device_operations dasd_device_operations; -extern struct kmem_cache *dasd_page_cache; +extern kmem_cache_t *dasd_page_cache; struct dasd_ccw_req * dasd_kmalloc_request(char *, int, int, struct dasd_device *); diff --git a/trunk/drivers/s390/cio/css.c b/trunk/drivers/s390/cio/css.c index 26cf2f5ae2e7..ad7f7e1c0163 100644 --- a/trunk/drivers/s390/cio/css.c +++ b/trunk/drivers/s390/cio/css.c @@ -334,7 +334,7 @@ static LIST_HEAD(slow_subchannels_head); static DEFINE_SPINLOCK(slow_subchannel_lock); static void -css_trigger_slow_path(struct work_struct *unused) +css_trigger_slow_path(void) { CIO_TRACE_EVENT(4, "slowpath"); @@ -359,7 +359,8 @@ css_trigger_slow_path(struct work_struct *unused) spin_unlock_irq(&slow_subchannel_lock); } -DECLARE_WORK(slow_path_work, css_trigger_slow_path); +typedef void (*workfunc)(void *); +DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); struct workqueue_struct *slow_path_wq; /* Reprobe subchannel if unregistered. */ @@ -396,7 +397,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) } /* Work function used to reprobe all unregistered subchannels. */ -static void reprobe_all(struct work_struct *unused) +static void reprobe_all(void *data) { int ret; @@ -412,7 +413,7 @@ static void reprobe_all(struct work_struct *unused) need_reprobe); } -DECLARE_WORK(css_reprobe_work, reprobe_all); +DECLARE_WORK(css_reprobe_work, reprobe_all, NULL); /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) diff --git a/trunk/drivers/s390/crypto/ap_bus.c b/trunk/drivers/s390/crypto/ap_bus.c index e4dc947e74e9..6a54334ffe09 100644 --- a/trunk/drivers/s390/crypto/ap_bus.c +++ b/trunk/drivers/s390/crypto/ap_bus.c @@ -37,7 +37,7 @@ #include "ap_bus.h" /* Some prototypes. */ -static void ap_scan_bus(struct work_struct *); +static void ap_scan_bus(void *); static void ap_poll_all(unsigned long); static void ap_poll_timeout(unsigned long); static int ap_poll_thread_start(void); @@ -71,7 +71,7 @@ static struct device *ap_root_device = NULL; static struct workqueue_struct *ap_work_queue; static struct timer_list ap_config_timer; static int ap_config_time = AP_CONFIG_TIME; -static DECLARE_WORK(ap_config_work, ap_scan_bus); +static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL); /** * Tasklet & timer for AP request polling. @@ -732,7 +732,7 @@ static void ap_device_release(struct device *dev) kfree(ap_dev); } -static void ap_scan_bus(struct work_struct *unused) +static void ap_scan_bus(void *data) { struct ap_device *ap_dev; struct device *dev; diff --git a/trunk/drivers/s390/net/lcs.c b/trunk/drivers/s390/net/lcs.c index e5665b6743a1..08d4e47070bd 100644 --- a/trunk/drivers/s390/net/lcs.c +++ b/trunk/drivers/s390/net/lcs.c @@ -67,7 +67,7 @@ static char debug_buffer[255]; * Some prototypes. */ static void lcs_tasklet(unsigned long); -static void lcs_start_kernel_thread(struct work_struct *); +static void lcs_start_kernel_thread(struct lcs_card *card); static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); static int lcs_recovery(void *ptr); @@ -1724,9 +1724,8 @@ lcs_stopcard(struct lcs_card *card) * Kernel Thread helper functions for LGW initiated commands */ static void -lcs_start_kernel_thread(struct work_struct *work) +lcs_start_kernel_thread(struct lcs_card *card) { - struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); LCS_DBF_TEXT(5, trace, "krnthrd"); if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) kernel_thread(lcs_recovery, (void *) card, SIGCHLD); @@ -2054,7 +2053,8 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) ccwgdev->cdev[0]->handler = lcs_irq; ccwgdev->cdev[1]->handler = lcs_irq; card->gdev = ccwgdev; - INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); + INIT_WORK(&card->kernel_thread_starter, + (void *) lcs_start_kernel_thread, card); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; diff --git a/trunk/drivers/s390/net/qeth_main.c b/trunk/drivers/s390/net/qeth_main.c index 2bde4f1fb9c2..7fdc5272c446 100644 --- a/trunk/drivers/s390/net/qeth_main.c +++ b/trunk/drivers/s390/net/qeth_main.c @@ -1039,9 +1039,8 @@ qeth_do_start_thread(struct qeth_card *card, unsigned long thread) } static void -qeth_start_kernel_thread(struct work_struct *work) +qeth_start_kernel_thread(struct qeth_card *card) { - struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_DBF_TEXT(trace , 2, "strthrd"); if (card->read.state != CH_STATE_UP && @@ -1104,7 +1103,8 @@ qeth_setup_card(struct qeth_card *card) card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; - INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); + INIT_WORK(&card->kernel_thread_starter, + (void *)qeth_start_kernel_thread,card); INIT_LIST_HEAD(&card->ip_list); card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); if (!card->ip_tbd_list) { diff --git a/trunk/drivers/s390/scsi/zfcp_def.h b/trunk/drivers/s390/scsi/zfcp_def.h index 32933ed54b8a..74c0eac083e4 100644 --- a/trunk/drivers/s390/scsi/zfcp_def.h +++ b/trunk/drivers/s390/scsi/zfcp_def.h @@ -1032,9 +1032,9 @@ struct zfcp_data { wwn_t init_wwpn; fcp_lun_t init_fcp_lun; char *driver_version; - struct kmem_cache *fsf_req_qtcb_cache; - struct kmem_cache *sr_buffer_cache; - struct kmem_cache *gid_pn_cache; + kmem_cache_t *fsf_req_qtcb_cache; + kmem_cache_t *sr_buffer_cache; + kmem_cache_t *gid_pn_cache; }; /** diff --git a/trunk/drivers/s390/scsi/zfcp_fsf.c b/trunk/drivers/s390/scsi/zfcp_fsf.c index 067f1519eb04..277826cdd0c8 100644 --- a/trunk/drivers/s390/scsi/zfcp_fsf.c +++ b/trunk/drivers/s390/scsi/zfcp_fsf.c @@ -109,7 +109,7 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags) ptr = kmalloc(size, GFP_ATOMIC); else ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, - GFP_ATOMIC); + SLAB_ATOMIC); } if (unlikely(!ptr)) diff --git a/trunk/drivers/scsi/53c700.c b/trunk/drivers/scsi/53c700.c index 68103e508db7..335a25540c08 100644 --- a/trunk/drivers/scsi/53c700.c +++ b/trunk/drivers/scsi/53c700.c @@ -313,7 +313,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, hostdata->status = memory + STATUS_OFFSET; /* all of these offsets are L1_CACHE_BYTES separated. It is fatal * if this isn't sufficient separation to avoid dma flushing issues */ - BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment()); + BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment()); hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET); hostdata->dev = dev; @@ -362,11 +362,11 @@ NCR_700_detect(struct scsi_host_template *tpnt, for (j = 0; j < PATCHES; j++) script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); /* now patch up fixed addresses. */ - script_patch_32(hostdata->dev, script, MessageLocation, + script_patch_32(script, MessageLocation, pScript + MSGOUT_OFFSET); - script_patch_32(hostdata->dev, script, StatusAddress, + script_patch_32(script, StatusAddress, pScript + STATUS_OFFSET); - script_patch_32(hostdata->dev, script, ReceiveMsgAddress, + script_patch_32(script, ReceiveMsgAddress, pScript + MSGIN_OFFSET); hostdata->script = script; @@ -821,9 +821,8 @@ process_extended_message(struct Scsi_Host *host, shost_printk(KERN_WARNING, host, "Unexpected SDTR msg\n"); hostdata->msgout[0] = A_REJECT_MSG; - dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); - script_patch_16(hostdata->dev, hostdata->script, - MessageCount, 1); + dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); + script_patch_16(hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; @@ -834,9 +833,8 @@ process_extended_message(struct Scsi_Host *host, printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", host->host_no, pun, lun); hostdata->msgout[0] = A_REJECT_MSG; - dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); - script_patch_16(hostdata->dev, hostdata->script, MessageCount, - 1); + dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); + script_patch_16(hostdata->script, MessageCount, 1); resume_offset = hostdata->pScript + Ent_SendMessageWithATN; break; @@ -849,9 +847,8 @@ process_extended_message(struct Scsi_Host *host, printk("\n"); /* just reject it */ hostdata->msgout[0] = A_REJECT_MSG; - dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); - script_patch_16(hostdata->dev, hostdata->script, MessageCount, - 1); + dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); + script_patch_16(hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; @@ -932,9 +929,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata printk("\n"); /* just reject it */ hostdata->msgout[0] = A_REJECT_MSG; - dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); - script_patch_16(hostdata->dev, hostdata->script, MessageCount, - 1); + dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); + script_patch_16(hostdata->script, MessageCount, 1); /* SendMsgOut returns, so set up the return * address */ resume_offset = hostdata->pScript + Ent_SendMessageWithATN; @@ -943,7 +939,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata } NCR_700_writel(temp, host, TEMP_REG); /* set us up to receive another message */ - dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); + dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); return resume_offset; } @@ -1023,9 +1019,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); slot->SG[1].pAddr = 0; slot->resume_offset = hostdata->pScript; - dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); - dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); - + dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); + dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); + /* queue the command for reissue */ slot->state = NCR_700_SLOT_QUEUED; slot->flags = NCR_700_FLAG_AUTOSENSE; @@ -1140,12 +1136,11 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, hostdata->cmd = slot->cmnd; /* re-patch for this command */ - script_patch_32_abs(hostdata->dev, hostdata->script, - CommandAddress, slot->pCmd); - script_patch_16(hostdata->dev, hostdata->script, + script_patch_32_abs(hostdata->script, CommandAddress, + slot->pCmd); + script_patch_16(hostdata->script, CommandCount, slot->cmnd->cmd_len); - script_patch_32_abs(hostdata->dev, hostdata->script, - SGScriptStartAddress, + script_patch_32_abs(hostdata->script, SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); /* Note: setting SXFER only works if we're @@ -1155,13 +1150,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, * should therefore always clear ACK */ NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), host, SXFER_REG); - dma_cache_sync(hostdata->dev, hostdata->msgin, + dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); - dma_cache_sync(hostdata->dev, hostdata->msgout, + dma_cache_sync(hostdata->msgout, MSG_ARRAY_SIZE, DMA_TO_DEVICE); /* I'm just being paranoid here, the command should * already have been flushed from the cache */ - dma_cache_sync(hostdata->dev, slot->cmnd->cmnd, + dma_cache_sync(slot->cmnd->cmnd, slot->cmnd->cmd_len, DMA_TO_DEVICE); @@ -1225,7 +1220,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, hostdata->reselection_id = reselection_id; /* just in case we have a stale simple tag message, clear it */ hostdata->msgin[1] = 0; - dma_cache_sync(hostdata->dev, hostdata->msgin, + dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL); if(hostdata->tag_negotiated & (1<pScript + Ent_GetReselectionWithTag; @@ -1341,7 +1336,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp) hostdata->cmd = NULL; /* clear any stale simple tag message */ hostdata->msgin[1] = 0; - dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, + dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL); if(id == 0xff) { @@ -1438,30 +1433,29 @@ NCR_700_start_command(struct scsi_cmnd *SCp) NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); } - script_patch_16(hostdata->dev, hostdata->script, MessageCount, count); + script_patch_16(hostdata->script, MessageCount, count); - script_patch_ID(hostdata->dev, hostdata->script, + script_patch_ID(hostdata->script, Device_ID, 1<dev, hostdata->script, CommandAddress, + script_patch_32_abs(hostdata->script, CommandAddress, slot->pCmd); - script_patch_16(hostdata->dev, hostdata->script, CommandCount, - SCp->cmd_len); + script_patch_16(hostdata->script, CommandCount, SCp->cmd_len); /* finally plumb the beginning of the SG list into the script * */ - script_patch_32_abs(hostdata->dev, hostdata->script, - SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); + script_patch_32_abs(hostdata->script, SGScriptStartAddress, + to32bit(&slot->pSG[0].ins)); NCR_700_clear_fifo(SCp->device->host); if(slot->resume_offset == 0) slot->resume_offset = hostdata->pScript; /* now perform all the writebacks and invalidates */ - dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE); - dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, + dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE); + dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); - dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); - dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE); + dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); + dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE); /* set the synchronous period/offset */ NCR_700_writeb(NCR_700_get_SXFER(SCp->device), @@ -1637,7 +1631,7 @@ NCR_700_intr(int irq, void *dev_id) slot->SG[i].ins = bS_to_host(SCRIPT_NOP); slot->SG[i].pAddr = 0; } - dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); + dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); /* and pretend we disconnected after * the command phase */ resume_offset = hostdata->pScript + Ent_MsgInDuringData; @@ -1903,9 +1897,9 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) } slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); slot->SG[i].pAddr = 0; - dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); + dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); DEBUG((" SETTING %08lx to %x\n", - (&slot->pSG[i].ins), + (&slot->pSG[i].ins), slot->SG[i].ins)); } slot->resume_offset = 0; diff --git a/trunk/drivers/scsi/53c700.h b/trunk/drivers/scsi/53c700.h index f38822db4210..f5c3caf344a7 100644 --- a/trunk/drivers/scsi/53c700.h +++ b/trunk/drivers/scsi/53c700.h @@ -415,31 +415,31 @@ struct NCR_700_Host_Parameters { #define NCR_710_MIN_XFERP 0 #define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */ -#define script_patch_32(dev, script, symbol, value) \ +#define script_patch_32(script, symbol, value) \ { \ int i; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \ - dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ + dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ DEBUG((" script, patching %s at %d to 0x%lx\n", \ #symbol, A_##symbol##_used[i], (value))); \ } \ } -#define script_patch_32_abs(dev, script, symbol, value) \ +#define script_patch_32_abs(script, symbol, value) \ { \ int i; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ (script)[A_##symbol##_used[i]] = bS_to_host(value); \ - dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ + dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ DEBUG((" script, patching %s at %d to 0x%lx\n", \ #symbol, A_##symbol##_used[i], (value))); \ } \ } /* Used for patching the SCSI ID in the SELECT instruction */ -#define script_patch_ID(dev, script, symbol, value) \ +#define script_patch_ID(script, symbol, value) \ { \ int i; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ @@ -447,13 +447,13 @@ struct NCR_700_Host_Parameters { val &= 0xff00ffff; \ val |= ((value) & 0xff) << 16; \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \ - dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ + dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ DEBUG((" script, patching ID field %s at %d to 0x%x\n", \ #symbol, A_##symbol##_used[i], val)); \ } \ } -#define script_patch_16(dev, script, symbol, value) \ +#define script_patch_16(script, symbol, value) \ { \ int i; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ @@ -461,7 +461,7 @@ struct NCR_700_Host_Parameters { val &= 0xffff0000; \ val |= ((value) & 0xffff); \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \ - dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ + dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ DEBUG((" script, patching short field %s at %d to 0x%x\n", \ #symbol, A_##symbol##_used[i], val)); \ } \ diff --git a/trunk/drivers/scsi/NCR5380.c b/trunk/drivers/scsi/NCR5380.c index bb3cb3360541..a6aa91072880 100644 --- a/trunk/drivers/scsi/NCR5380.c +++ b/trunk/drivers/scsi/NCR5380.c @@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags) hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; - INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); + INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); #ifdef NCR5380_STATS for (i = 0; i < 8; ++i) { @@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) /* Run the coroutine if it isn't already running. */ /* Kick off command processing */ - schedule_delayed_work(&hostdata->coroutine, 0); + schedule_work(&hostdata->coroutine); return 0; } @@ -1033,10 +1033,9 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) * host lock and called routines may take the isa dma lock. */ -static void NCR5380_main(struct work_struct *work) +static void NCR5380_main(void *p) { - struct NCR5380_hostdata *hostdata = - container_of(work, struct NCR5380_hostdata, coroutine.work); + struct NCR5380_hostdata *hostdata = p; struct Scsi_Host *instance = hostdata->host; Scsi_Cmnd *tmp, *prev; int done; @@ -1222,7 +1221,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) } /* if BASR_IRQ */ spin_unlock_irqrestore(instance->host_lock, flags); if(!done) - schedule_delayed_work(&hostdata->coroutine, 0); + schedule_work(&hostdata->coroutine); } while (!done); return IRQ_HANDLED; } diff --git a/trunk/drivers/scsi/NCR5380.h b/trunk/drivers/scsi/NCR5380.h index 713a108c02ef..1bc73de496b0 100644 --- a/trunk/drivers/scsi/NCR5380.h +++ b/trunk/drivers/scsi/NCR5380.h @@ -271,7 +271,7 @@ struct NCR5380_hostdata { unsigned long time_expires; /* in jiffies, set prior to sleeping */ int select_time; /* timer in select for target response */ volatile Scsi_Cmnd *selecting; - struct delayed_work coroutine; /* our co-routine */ + struct work_struct coroutine; /* our co-routine */ #ifdef NCR5380_STATS unsigned timebase; /* Base for time calcs */ long time_read[8]; /* time to do reads */ @@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance); #ifndef DONT_USE_INTR static irqreturn_t NCR5380_intr(int irq, void *dev_id); #endif -static void NCR5380_main(struct work_struct *work); +static void NCR5380_main(void *ptr); static void NCR5380_print_options(struct Scsi_Host *instance); #ifdef NDEBUG static void NCR5380_print_phase(struct Scsi_Host *instance); diff --git a/trunk/drivers/scsi/aha152x.c b/trunk/drivers/scsi/aha152x.c index 0cec742d12e9..306f46b85a55 100644 --- a/trunk/drivers/scsi/aha152x.c +++ b/trunk/drivers/scsi/aha152x.c @@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq; * Run service completions on the card with interrupts enabled. * */ -static void run(struct work_struct *work) +static void run(void) { struct aha152x_hostdata *hd; @@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id) HOSTDATA(shpnt)->service=1; /* Poke the BH handler */ - INIT_WORK(&aha152x_tq, run); + INIT_WORK(&aha152x_tq, (void *) run, NULL); schedule_work(&aha152x_tq); } DO_UNLOCK(flags); diff --git a/trunk/drivers/scsi/aic94xx/aic94xx.h b/trunk/drivers/scsi/aic94xx/aic94xx.h index 32f513b1b78a..71a031df7a34 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx.h +++ b/trunk/drivers/scsi/aic94xx/aic94xx.h @@ -56,8 +56,8 @@ /* 2*ITNL timeout + 1 second */ #define AIC94XX_SCB_TIMEOUT (5*HZ) -extern struct kmem_cache *asd_dma_token_cache; -extern struct kmem_cache *asd_ascb_cache; +extern kmem_cache_t *asd_dma_token_cache; +extern kmem_cache_t *asd_ascb_cache; extern char sas_addr_str[2*SAS_ADDR_SIZE + 1]; static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c b/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c index da94e126ca83..af7e01134364 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -1047,7 +1047,7 @@ irqreturn_t asd_hw_isr(int irq, void *dev_id) static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) { - extern struct kmem_cache *asd_ascb_cache; + extern kmem_cache_t *asd_ascb_cache; struct asd_seq_data *seq = &asd_ha->seq; struct asd_ascb *ascb; unsigned long flags; diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_init.c b/trunk/drivers/scsi/aic94xx/aic94xx_init.c index fbc82b00a418..42302ef05ee5 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_init.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_init.c @@ -450,8 +450,8 @@ static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) asd_ha->scb_pool = NULL; } -struct kmem_cache *asd_dma_token_cache; -struct kmem_cache *asd_ascb_cache; +kmem_cache_t *asd_dma_token_cache; +kmem_cache_t *asd_ascb_cache; static int asd_create_global_caches(void) { diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_scb.c b/trunk/drivers/scsi/aic94xx/aic94xx_scb.c index 75ed6b0569d1..14d5d8c2ee13 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_scb.c @@ -414,10 +414,9 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) } /* hard reset a phy later */ -static void do_phy_reset_later(struct work_struct *work) +static void do_phy_reset_later(void *data) { - struct sas_phy *sas_phy = - container_of(work, struct sas_phy, reset_work); + struct sas_phy *sas_phy = data; int error; ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, @@ -431,7 +430,7 @@ static void do_phy_reset_later(struct work_struct *work) static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) { - INIT_WORK(&sas_phy->reset_work, do_phy_reset_later); + INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy); queue_work(shost->work_q, &sas_phy->reset_work); } @@ -443,7 +442,7 @@ static void task_kill_later(struct asd_ascb *ascb) struct Scsi_Host *shost = sas_ha->core.shost; struct sas_task *task = ascb->uldd_task; - INIT_WORK(&task->abort_work, sas_task_abort); + INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task); queue_work(shost->work_q, &task->abort_work); } diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvstgt.c b/trunk/drivers/scsi/ibmvscsi/ibmvstgt.c index e28260f05d6b..0e74174a1b37 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -67,7 +67,6 @@ struct vio_port { unsigned long liobn; unsigned long riobn; - struct srp_target *target; }; static struct workqueue_struct *vtgtd; @@ -686,10 +685,10 @@ static inline struct viosrp_crq *next_crq(struct crq_queue *queue) return crq; } -static void handle_crq(struct work_struct *work) +static void handle_crq(void *data) { - struct vio_port *vport = container_of(work, struct vio_port, crq_work); - struct srp_target *target = vport->target; + struct srp_target *target = (struct srp_target *) data; + struct vio_port *vport = target_to_port(target); struct viosrp_crq *crq; int done = 0; @@ -823,7 +822,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) target->shost = shost; vport->dma_dev = dev; target->ldata = vport; - vport->target = target; err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT, SRP_MAX_IU_LEN); if (err) @@ -839,7 +837,7 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) vport->liobn = dma[0]; vport->riobn = dma[5]; - INIT_WORK(&vport->crq_work, handle_crq); + INIT_WORK(&vport->crq_work, handle_crq, target); err = crq_queue_create(&vport->crq_queue, target); if (err) diff --git a/trunk/drivers/scsi/ide-scsi.c b/trunk/drivers/scsi/ide-scsi.c index 8f6b5bf580f6..1427a41e8441 100644 --- a/trunk/drivers/scsi/ide-scsi.c +++ b/trunk/drivers/scsi/ide-scsi.c @@ -110,7 +110,6 @@ typedef struct ide_scsi_obj { } idescsi_scsi_t; static DEFINE_MUTEX(idescsi_ref_mutex); -static int idescsi_nocd; /* Set by module param to skip cd */ #define ide_scsi_g(disk) \ container_of((disk)->private_data, struct ide_scsi_obj, driver) @@ -1128,9 +1127,6 @@ static int ide_scsi_probe(ide_drive_t *drive) warned = 1; } - if (idescsi_nocd && drive->media == ide_cdrom) - return -ENODEV; - if (!strstr("ide-scsi", drive->driver_req) || !drive->present || drive->media == ide_disk || @@ -1191,8 +1187,6 @@ static void __exit exit_idescsi_module(void) driver_unregister(&idescsi_driver.gen_driver); } -module_param(idescsi_nocd, int, 0600); -MODULE_PARM_DESC(idescsi_nocd, "Disable handling of CD-ROMs so they may be driven by ide-cd"); module_init(init_idescsi_module); module_exit(exit_idescsi_module); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/scsi/imm.c b/trunk/drivers/scsi/imm.c index 0464c182c577..e31f6122106f 100644 --- a/trunk/drivers/scsi/imm.c +++ b/trunk/drivers/scsi/imm.c @@ -36,7 +36,7 @@ typedef struct { int base_hi; /* Hi Base address for ECP-ISA chipset */ int mode; /* Transfer mode */ struct scsi_cmnd *cur_cmd; /* Current queued command */ - struct delayed_work imm_tq; /* Polling interrupt stuff */ + struct work_struct imm_tq; /* Polling interrupt stuff */ unsigned long jstart; /* Jiffies at start */ unsigned failed:1; /* Failure flag */ unsigned dp:1; /* Data phase present */ @@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd) * the scheduler's task queue to generate a stream of call-backs and * complete the request when the drive is ready. */ -static void imm_interrupt(struct work_struct *work) +static void imm_interrupt(void *data) { - imm_struct *dev = container_of(work, imm_struct, imm_tq.work); + imm_struct *dev = (imm_struct *) data; struct scsi_cmnd *cmd = dev->cur_cmd; struct Scsi_Host *host = cmd->device->host; unsigned long flags; @@ -745,6 +745,7 @@ static void imm_interrupt(struct work_struct *work) return; } if (imm_engine(dev, cmd)) { + INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev); schedule_delayed_work(&dev->imm_tq, 1); return; } @@ -952,7 +953,8 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, cmd->result = DID_ERROR << 16; /* default return code */ cmd->SCp.phase = 0; /* bus free */ - schedule_delayed_work(&dev->imm_tq, 0); + INIT_WORK(&dev->imm_tq, imm_interrupt, dev); + schedule_work(&dev->imm_tq); imm_pb_claim(dev); @@ -1223,7 +1225,7 @@ static int __imm_attach(struct parport *pb) else ports = 8; - INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); + INIT_WORK(&dev->imm_tq, imm_interrupt, dev); err = -ENOMEM; host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index b318500785e5..2d83fbb806a5 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -2307,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref) /** * ipr_worker_thread - Worker thread - * @work: ioa config struct + * @data: ioa config struct * * Called at task level from a work thread. This function takes care * of adding and removing device from the mid-layer as configuration @@ -2316,14 +2316,13 @@ static void ipr_release_dump(struct kref *kref) * Return value: * nothing **/ -static void ipr_worker_thread(struct work_struct *work) +static void ipr_worker_thread(void *data) { unsigned long lock_flags; struct ipr_resource_entry *res; struct scsi_device *sdev; struct ipr_dump *dump; - struct ipr_ioa_cfg *ioa_cfg = - container_of(work, struct ipr_ioa_cfg, work_q); + struct ipr_ioa_cfg *ioa_cfg = data; u8 bus, target, lun; int did_work; @@ -6940,7 +6939,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) return -ENOMEM; for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); + ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); @@ -7122,7 +7121,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); INIT_LIST_HEAD(&ioa_cfg->free_res_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); - INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); init_waitqueue_head(&ioa_cfg->reset_wait_q); ioa_cfg->sdt_state = INACTIVE; if (ipr_enable_cache) diff --git a/trunk/drivers/scsi/libiscsi.c b/trunk/drivers/scsi/libiscsi.c index e11b23c641e2..5d8862189485 100644 --- a/trunk/drivers/scsi/libiscsi.c +++ b/trunk/drivers/scsi/libiscsi.c @@ -719,10 +719,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) return rc; } -static void iscsi_xmitworker(struct work_struct *work) +static void iscsi_xmitworker(void *data) { - struct iscsi_conn *conn = - container_of(work, struct iscsi_conn, xmitwork); + struct iscsi_conn *conn = data; int rc; /* * serialize Xmit worker on a per-connection basis. @@ -1513,7 +1512,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) goto mgmtqueue_alloc_fail; - INIT_WORK(&conn->xmitwork, iscsi_xmitworker); + INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); /* allocate login_mtask used for the login/text sequences */ spin_lock_bh(&session->lock); diff --git a/trunk/drivers/scsi/libsas/sas_discover.c b/trunk/drivers/scsi/libsas/sas_discover.c index fb7df7b75811..d977bd492d8d 100644 --- a/trunk/drivers/scsi/libsas/sas_discover.c +++ b/trunk/drivers/scsi/libsas/sas_discover.c @@ -647,12 +647,10 @@ void sas_unregister_domain_devices(struct asd_sas_port *port) * Discover process only interrogates devices in order to discover the * domain. */ -static void sas_discover_domain(struct work_struct *work) +static void sas_discover_domain(void *data) { int error = 0; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); - struct asd_sas_port *port = ev->port; + struct asd_sas_port *port = data; sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, &port->disc.pending); @@ -694,12 +692,10 @@ static void sas_discover_domain(struct work_struct *work) current->pid, error); } -static void sas_revalidate_domain(struct work_struct *work) +static void sas_revalidate_domain(void *data) { int res = 0; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); - struct asd_sas_port *port = ev->port; + struct asd_sas_port *port = data; sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, &port->disc.pending); @@ -726,7 +722,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) BUG_ON(ev >= DISC_NUM_EVENTS); sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, - &disc->disc_work[ev].work, port->ha->core.shost); + &disc->disc_work[ev], port->ha->core.shost); return 0; } @@ -741,15 +737,13 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) { int i; - static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { + static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, }; spin_lock_init(&disc->disc_event_lock); disc->pending = 0; - for (i = 0; i < DISC_NUM_EVENTS; i++) { - INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); - disc->disc_work[i].port = port; - } + for (i = 0; i < DISC_NUM_EVENTS; i++) + INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); } diff --git a/trunk/drivers/scsi/libsas/sas_event.c b/trunk/drivers/scsi/libsas/sas_event.c index d83392ee6823..19110ed1c89c 100644 --- a/trunk/drivers/scsi/libsas/sas_event.c +++ b/trunk/drivers/scsi/libsas/sas_event.c @@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) BUG_ON(event >= HA_NUM_EVENTS); sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, - &sas_ha->ha_events[event].work, sas_ha->core.shost); + &sas_ha->ha_events[event], sas_ha->core.shost); } static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) @@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) BUG_ON(event >= PORT_NUM_EVENTS); sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, - &phy->port_events[event].work, ha->core.shost); + &phy->port_events[event], ha->core.shost); } static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) @@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) BUG_ON(event >= PHY_NUM_EVENTS); sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, - &phy->phy_events[event].work, ha->core.shost); + &phy->phy_events[event], ha->core.shost); } int sas_init_events(struct sas_ha_struct *sas_ha) { - static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = { + static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { [HAE_RESET] = sas_hae_reset, }; @@ -64,10 +64,8 @@ int sas_init_events(struct sas_ha_struct *sas_ha) spin_lock_init(&sas_ha->event_lock); - for (i = 0; i < HA_NUM_EVENTS; i++) { - INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); - sas_ha->ha_events[i].ha = sas_ha; - } + for (i = 0; i < HA_NUM_EVENTS; i++) + INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); sas_ha->notify_ha_event = notify_ha_event; sas_ha->notify_port_event = notify_port_event; diff --git a/trunk/drivers/scsi/libsas/sas_init.c b/trunk/drivers/scsi/libsas/sas_init.c index 2f0c07fc3f48..0fb347b4b1a2 100644 --- a/trunk/drivers/scsi/libsas/sas_init.c +++ b/trunk/drivers/scsi/libsas/sas_init.c @@ -36,7 +36,7 @@ #include "../scsi_sas_internal.h" -struct kmem_cache *sas_task_cache; +kmem_cache_t *sas_task_cache; /*------------ SAS addr hash -----------*/ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) @@ -65,11 +65,9 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) /* ---------- HA events ---------- */ -void sas_hae_reset(struct work_struct *work) +void sas_hae_reset(void *data) { - struct sas_ha_event *ev = - container_of(work, struct sas_ha_event, work); - struct sas_ha_struct *ha = ev->ha; + struct sas_ha_struct *ha = data; sas_begin_event(HAE_RESET, &ha->event_lock, &ha->pending); diff --git a/trunk/drivers/scsi/libsas/sas_internal.h b/trunk/drivers/scsi/libsas/sas_internal.h index 137d7e496b6d..bffcee474921 100644 --- a/trunk/drivers/scsi/libsas/sas_internal.h +++ b/trunk/drivers/scsi/libsas/sas_internal.h @@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha); void sas_deform_port(struct asd_sas_phy *phy); -void sas_porte_bytes_dmaed(struct work_struct *work); -void sas_porte_broadcast_rcvd(struct work_struct *work); -void sas_porte_link_reset_err(struct work_struct *work); -void sas_porte_timer_event(struct work_struct *work); -void sas_porte_hard_reset(struct work_struct *work); +void sas_porte_bytes_dmaed(void *); +void sas_porte_broadcast_rcvd(void *); +void sas_porte_link_reset_err(void *); +void sas_porte_timer_event(void *); +void sas_porte_hard_reset(void *); int sas_notify_lldd_dev_found(struct domain_device *); void sas_notify_lldd_dev_gone(struct domain_device *); @@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy); struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); -void sas_hae_reset(struct work_struct *work); +void sas_hae_reset(void *); static inline void sas_queue_event(int event, spinlock_t *lock, unsigned long *pending, diff --git a/trunk/drivers/scsi/libsas/sas_phy.c b/trunk/drivers/scsi/libsas/sas_phy.c index b459c4b635b1..9340cdbae4a3 100644 --- a/trunk/drivers/scsi/libsas/sas_phy.c +++ b/trunk/drivers/scsi/libsas/sas_phy.c @@ -30,11 +30,9 @@ /* ---------- Phy events ---------- */ -static void sas_phye_loss_of_signal(struct work_struct *work) +static void sas_phye_loss_of_signal(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, &phy->phy_events_pending); @@ -42,22 +40,18 @@ static void sas_phye_loss_of_signal(struct work_struct *work) sas_deform_port(phy); } -static void sas_phye_oob_done(struct work_struct *work) +static void sas_phye_oob_done(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, &phy->phy_events_pending); phy->error = 0; } -static void sas_phye_oob_error(struct work_struct *work) +static void sas_phye_oob_error(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; struct sas_internal *i = @@ -86,11 +80,9 @@ static void sas_phye_oob_error(struct work_struct *work) } } -static void sas_phye_spinup_hold(struct work_struct *work) +static void sas_phye_spinup_hold(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); @@ -108,14 +100,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) { int i; - static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { + static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, [PHYE_OOB_DONE] = sas_phye_oob_done, [PHYE_OOB_ERROR] = sas_phye_oob_error, [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, }; - static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { + static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, @@ -130,18 +122,13 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->error = 0; INIT_LIST_HEAD(&phy->port_phy_el); - for (k = 0; k < PORT_NUM_EVENTS; k++) { - INIT_WORK(&phy->port_events[k].work, - sas_port_event_fns[k]); - phy->port_events[k].phy = phy; - } - - for (k = 0; k < PHY_NUM_EVENTS; k++) { - INIT_WORK(&phy->phy_events[k].work, - sas_phy_event_fns[k]); - phy->phy_events[k].phy = phy; - } + for (k = 0; k < PORT_NUM_EVENTS; k++) + INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], + phy); + for (k = 0; k < PHY_NUM_EVENTS; k++) + INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], + phy); phy->port = NULL; phy->ha = sas_ha; spin_lock_init(&phy->frame_rcvd_lock); diff --git a/trunk/drivers/scsi/libsas/sas_port.c b/trunk/drivers/scsi/libsas/sas_port.c index 971c37ceecb4..253cdcf306a2 100644 --- a/trunk/drivers/scsi/libsas/sas_port.c +++ b/trunk/drivers/scsi/libsas/sas_port.c @@ -181,11 +181,9 @@ void sas_deform_port(struct asd_sas_phy *phy) /* ---------- SAS port events ---------- */ -void sas_porte_bytes_dmaed(struct work_struct *work) +void sas_porte_bytes_dmaed(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, &phy->port_events_pending); @@ -193,13 +191,11 @@ void sas_porte_bytes_dmaed(struct work_struct *work) sas_form_port(phy); } -void sas_porte_broadcast_rcvd(struct work_struct *work) +void sas_porte_broadcast_rcvd(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; unsigned long flags; u32 prim; + struct asd_sas_phy *phy = data; sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, &phy->port_events_pending); @@ -212,11 +208,9 @@ void sas_porte_broadcast_rcvd(struct work_struct *work) sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); } -void sas_porte_link_reset_err(struct work_struct *work) +void sas_porte_link_reset_err(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, &phy->port_events_pending); @@ -224,11 +218,9 @@ void sas_porte_link_reset_err(struct work_struct *work) sas_deform_port(phy); } -void sas_porte_timer_event(struct work_struct *work) +void sas_porte_timer_event(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, &phy->port_events_pending); @@ -236,11 +228,9 @@ void sas_porte_timer_event(struct work_struct *work) sas_deform_port(phy); } -void sas_porte_hard_reset(struct work_struct *work) +void sas_porte_hard_reset(void *data) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); - struct asd_sas_phy *phy = ev->phy; + struct asd_sas_phy *phy = data; sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, &phy->port_events_pending); diff --git a/trunk/drivers/scsi/libsas/sas_scsi_host.c b/trunk/drivers/scsi/libsas/sas_scsi_host.c index 22672d54aa27..e064aac06b90 100644 --- a/trunk/drivers/scsi/libsas/sas_scsi_host.c +++ b/trunk/drivers/scsi/libsas/sas_scsi_host.c @@ -846,10 +846,8 @@ static int do_sas_task_abort(struct sas_task *task) return -EAGAIN; } -void sas_task_abort(struct work_struct *work) +void sas_task_abort(struct sas_task *task) { - struct sas_task *task = - container_of(work, struct sas_task, abort_work); int i; for (i = 0; i < 5; i++) diff --git a/trunk/drivers/scsi/oktagon_esp.c b/trunk/drivers/scsi/oktagon_esp.c index c116a6ae3c54..dd67a68c5c23 100644 --- a/trunk/drivers/scsi/oktagon_esp.c +++ b/trunk/drivers/scsi/oktagon_esp.c @@ -72,12 +72,12 @@ static void dma_advance_sg(Scsi_Cmnd *); static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x); #ifdef USE_BOTTOM_HALF -static void dma_commit(struct work_struct *unused); +static void dma_commit(void *opaque); long oktag_to_io(long *paddr, long *addr, long len); long oktag_from_io(long *addr, long *paddr, long len); -static DECLARE_WORK(tq_fake_dma, dma_commit); +static DECLARE_WORK(tq_fake_dma, dma_commit, NULL); #define DMA_MAXTRANSFER 0x8000 @@ -266,7 +266,7 @@ oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x) */ -static void dma_commit(struct work_struct *unused) +static void dma_commit(void *opaque) { long wait,len2,pos; struct NCR_ESP *esp; diff --git a/trunk/drivers/scsi/ppa.c b/trunk/drivers/scsi/ppa.c index 584ba4d6e038..89a2a9f11e41 100644 --- a/trunk/drivers/scsi/ppa.c +++ b/trunk/drivers/scsi/ppa.c @@ -31,7 +31,7 @@ typedef struct { int base; /* Actual port address */ int mode; /* Transfer mode */ struct scsi_cmnd *cur_cmd; /* Current queued command */ - struct delayed_work ppa_tq; /* Polling interrupt stuff */ + struct work_struct ppa_tq; /* Polling interrupt stuff */ unsigned long jstart; /* Jiffies at start */ unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ unsigned int failed:1; /* Failure flag */ @@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd) * the scheduler's task queue to generate a stream of call-backs and * complete the request when the drive is ready. */ -static void ppa_interrupt(struct work_struct *work) +static void ppa_interrupt(void *data) { - ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); + ppa_struct *dev = (ppa_struct *) data; struct scsi_cmnd *cmd = dev->cur_cmd; if (!cmd) { @@ -637,6 +637,7 @@ static void ppa_interrupt(struct work_struct *work) return; } if (ppa_engine(dev, cmd)) { + dev->ppa_tq.data = (void *) dev; schedule_delayed_work(&dev->ppa_tq, 1); return; } @@ -821,7 +822,8 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, cmd->result = DID_ERROR << 16; /* default return code */ cmd->SCp.phase = 0; /* bus free */ - schedule_delayed_work(&dev->ppa_tq, 0); + dev->ppa_tq.data = dev; + schedule_work(&dev->ppa_tq); ppa_pb_claim(dev); @@ -1084,7 +1086,7 @@ static int __ppa_attach(struct parport *pb) else ports = 8; - INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); + INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); err = -ENOMEM; host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index d03523d3bf38..cbe0cad83b68 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -24,7 +24,7 @@ char qla2x00_version_str[40]; /* * SRB allocation cache */ -static struct kmem_cache *srb_cachep; +static kmem_cache_t *srb_cachep; /* * Ioctl related information. diff --git a/trunk/drivers/scsi/qla4xxx/ql4_os.c b/trunk/drivers/scsi/qla4xxx/ql4_os.c index 9ef693c8809a..db9d88e7bee7 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_os.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_os.c @@ -19,7 +19,7 @@ char qla4xxx_version_str[40]; /* * SRB allocation cache */ -static struct kmem_cache *srb_cachep; +static kmem_cache_t *srb_cachep; /* * Module parameter information and variables @@ -961,10 +961,9 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, * the mid-level tries to sleep when it reaches the driver threshold * "host->can_queue". This can cause a panic if we were in our interrupt code. **/ -static void qla4xxx_do_dpc(struct work_struct *work) +static void qla4xxx_do_dpc(void *data) { - struct scsi_qla_host *ha = - container_of(work, struct scsi_qla_host, dpc_work); + struct scsi_qla_host *ha = (struct scsi_qla_host *) data; struct ddb_entry *ddb_entry, *dtemp; DEBUG2(printk("scsi%ld: %s: DPC handler waking up." @@ -1254,7 +1253,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, ret = -ENODEV; goto probe_failed; } - INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); + INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); ret = request_irq(pdev->irq, qla4xxx_intr_handler, SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); diff --git a/trunk/drivers/scsi/scsi.c b/trunk/drivers/scsi/scsi.c index 24cffd98ee63..fafc00deaade 100644 --- a/trunk/drivers/scsi/scsi.c +++ b/trunk/drivers/scsi/scsi.c @@ -136,7 +136,7 @@ const char * scsi_device_type(unsigned type) EXPORT_SYMBOL(scsi_device_type); struct scsi_host_cmd_pool { - struct kmem_cache *slab; + kmem_cache_t *slab; unsigned int users; char *name; unsigned int slab_flags; diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index 1748e27501cd..fb616c69151f 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -36,7 +36,7 @@ struct scsi_host_sg_pool { size_t size; char *name; - struct kmem_cache *slab; + kmem_cache_t *slab; mempool_t *pool; }; @@ -241,7 +241,7 @@ struct scsi_io_context { char sense[SCSI_SENSE_BUFFERSIZE]; }; -static struct kmem_cache *scsi_io_context_cache; +static kmem_cache_t *scsi_io_context_cache; static void scsi_end_async(struct request *req, int uptodate) { diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 14e635aa44ce..4d656148bd67 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -437,10 +437,9 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, goto retry; } -static void scsi_target_reap_usercontext(struct work_struct *work) +static void scsi_target_reap_usercontext(void *data) { - struct scsi_target *starget = - container_of(work, struct scsi_target, ew.work); + struct scsi_target *starget = data; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; @@ -476,7 +475,7 @@ void scsi_target_reap(struct scsi_target *starget) starget->state = STARGET_DEL; spin_unlock_irqrestore(shost->host_lock, flags); execute_in_process_context(scsi_target_reap_usercontext, - &starget->ew); + starget, &starget->ew); return; } diff --git a/trunk/drivers/scsi/scsi_sysfs.c b/trunk/drivers/scsi/scsi_sysfs.c index 259c90cfa367..e1a91665d1c2 100644 --- a/trunk/drivers/scsi/scsi_sysfs.c +++ b/trunk/drivers/scsi/scsi_sysfs.c @@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev) put_device(&sdev->sdev_gendev); } -static void scsi_device_dev_release_usercontext(struct work_struct *work) +static void scsi_device_dev_release_usercontext(void *data) { + struct device *dev = data; struct scsi_device *sdev; struct device *parent; struct scsi_target *starget; unsigned long flags; - sdev = container_of(work, struct scsi_device, ew.work); - - parent = sdev->sdev_gendev.parent; + parent = dev->parent; + sdev = to_scsi_device(dev); starget = to_scsi_target(parent); spin_lock_irqsave(sdev->host->host_lock, flags); @@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) static void scsi_device_dev_release(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); - execute_in_process_context(scsi_device_dev_release_usercontext, + execute_in_process_context(scsi_device_dev_release_usercontext, dev, &sdp->ew); } diff --git a/trunk/drivers/scsi/scsi_tgt_lib.c b/trunk/drivers/scsi/scsi_tgt_lib.c index d402aff5f314..39da5cd6fb6f 100644 --- a/trunk/drivers/scsi/scsi_tgt_lib.c +++ b/trunk/drivers/scsi/scsi_tgt_lib.c @@ -33,7 +33,7 @@ #include "scsi_tgt_priv.h" static struct workqueue_struct *scsi_tgtd; -static struct kmem_cache *scsi_tgt_cmd_cache; +static kmem_cache_t *scsi_tgt_cmd_cache; /* * TODO: this struct will be killed when the block layer supports large bios @@ -185,11 +185,10 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); } -static void scsi_tgt_cmd_destroy(struct work_struct *work) +static void scsi_tgt_cmd_destroy(void *data) { - struct scsi_tgt_cmd *tcmd = - container_of(work, struct scsi_tgt_cmd, work); - struct scsi_cmnd *cmd = tcmd->rq->special; + struct scsi_cmnd *cmd = data; + struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, rq_data_dir(cmd->request)); @@ -215,7 +214,6 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd, struct list_head *head; tcmd->tag = tag; - INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); spin_lock_irqsave(&qdata->cmd_hash_lock, flags); head = &qdata->cmd_hash[cmd_hashfn(tag)]; list_add(&tcmd->hash_list, head); @@ -305,7 +303,7 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost) cmd = tcmd->rq->special; shost->hostt->eh_abort_handler(cmd); - scsi_tgt_cmd_destroy(&tcmd->work); + scsi_tgt_cmd_destroy(cmd); } } EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); @@ -349,6 +347,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); scsi_tgt_uspace_send_status(cmd, tcmd->tag); + INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd); queue_work(scsi_tgtd, &tcmd->work); } @@ -550,15 +549,13 @@ static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { - struct scsi_tgt_cmd *tcmd; int err; err = shost->hostt->eh_abort_handler(cmd); if (err) eprintk("fail to abort %p\n", cmd); - tcmd = cmd->request->end_io_data; - scsi_tgt_cmd_destroy(&tcmd->work); + scsi_tgt_cmd_destroy(cmd); return err; } diff --git a/trunk/drivers/scsi/scsi_transport_fc.c b/trunk/drivers/scsi/scsi_transport_fc.c index 3571ce8934e7..38c215a78f69 100644 --- a/trunk/drivers/scsi/scsi_transport_fc.c +++ b/trunk/drivers/scsi/scsi_transport_fc.c @@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) #define FC_MGMTSRVR_PORTID 0x00000a -static void fc_timeout_deleted_rport(struct work_struct *work); -static void fc_timeout_fail_rport_io(struct work_struct *work); -static void fc_scsi_scan_rport(struct work_struct *work); +static void fc_timeout_deleted_rport(void *data); +static void fc_timeout_fail_rport_io(void *data); +static void fc_scsi_scan_rport(void *data); /* * Attribute counts pre object type... @@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost) * 1 on success / 0 already queued / < 0 for error **/ static int -fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, +fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, unsigned long delay) { if (unlikely(!fc_host_devloss_work_q(shost))) { @@ -1625,6 +1625,9 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, return -EINVAL; } + if (delay == 0) + return queue_work(fc_host_devloss_work_q(shost), work); + return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); } @@ -1709,13 +1712,12 @@ EXPORT_SYMBOL(fc_remove_host); * fc_starget_delete - called to delete the scsi decendents of an rport * (target and all sdevs) * - * @work: remote port to be operated on. + * @data: remote port to be operated on. **/ static void -fc_starget_delete(struct work_struct *work) +fc_starget_delete(void *data) { - struct fc_rport *rport = - container_of(work, struct fc_rport, stgt_delete_work); + struct fc_rport *rport = (struct fc_rport *)data; struct Scsi_Host *shost = rport_to_shost(rport); unsigned long flags; struct fc_internal *i = to_fc_internal(shost->transportt); @@ -1749,13 +1751,12 @@ fc_starget_delete(struct work_struct *work) /** * fc_rport_final_delete - finish rport termination and delete it. * - * @work: remote port to be deleted. + * @data: remote port to be deleted. **/ static void -fc_rport_final_delete(struct work_struct *work) +fc_rport_final_delete(void *data) { - struct fc_rport *rport = - container_of(work, struct fc_rport, rport_delete_work); + struct fc_rport *rport = (struct fc_rport *)data; struct device *dev = &rport->dev; struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); @@ -1769,7 +1770,7 @@ fc_rport_final_delete(struct work_struct *work) /* Delete SCSI target and sdevs */ if (rport->scsi_target_id != -1) - fc_starget_delete(&rport->stgt_delete_work); + fc_starget_delete(data); else if (i->f->dev_loss_tmo_callbk) i->f->dev_loss_tmo_callbk(rport); else if (i->f->terminate_rport_io) @@ -1828,11 +1829,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel, rport->channel = channel; rport->fast_io_fail_tmo = -1; - INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); - INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); - INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); - INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); - INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); + INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); + INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); + INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); + INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); + INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); spin_lock_irqsave(shost->host_lock, flags); @@ -1962,7 +1963,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, } if (match) { - struct delayed_work *work = + struct work_struct *work = &rport->dev_loss_work; memcpy(&rport->node_name, &ids->node_name, @@ -2266,13 +2267,12 @@ EXPORT_SYMBOL(fc_remote_port_rolechg); * was a SCSI target (thus was blocked), and failed * to return in the alloted time. * - * @work: rport target that failed to reappear in the alloted time. + * @data: rport target that failed to reappear in the alloted time. **/ static void -fc_timeout_deleted_rport(struct work_struct *work) +fc_timeout_deleted_rport(void *data) { - struct fc_rport *rport = - container_of(work, struct fc_rport, dev_loss_work.work); + struct fc_rport *rport = (struct fc_rport *)data; struct Scsi_Host *shost = rport_to_shost(rport); struct fc_host_attrs *fc_host = shost_to_fc_host(shost); unsigned long flags; @@ -2366,16 +2366,15 @@ fc_timeout_deleted_rport(struct work_struct *work) * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a * disconnected SCSI target. * - * @work: rport to terminate io on. + * @data: rport to terminate io on. * * Notes: Only requests the failure of the io, not that all are flushed * prior to returning. **/ static void -fc_timeout_fail_rport_io(struct work_struct *work) +fc_timeout_fail_rport_io(void *data) { - struct fc_rport *rport = - container_of(work, struct fc_rport, fail_io_work.work); + struct fc_rport *rport = (struct fc_rport *)data; struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); @@ -2388,13 +2387,12 @@ fc_timeout_fail_rport_io(struct work_struct *work) /** * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. * - * @work: remote port to be scanned. + * @data: remote port to be scanned. **/ static void -fc_scsi_scan_rport(struct work_struct *work) +fc_scsi_scan_rport(void *data) { - struct fc_rport *rport = - container_of(work, struct fc_rport, scan_work); + struct fc_rport *rport = (struct fc_rport *)data; struct Scsi_Host *shost = rport_to_shost(rport); unsigned long flags; diff --git a/trunk/drivers/scsi/scsi_transport_iscsi.c b/trunk/drivers/scsi/scsi_transport_iscsi.c index 9c22f1342715..9b25124a989e 100644 --- a/trunk/drivers/scsi/scsi_transport_iscsi.c +++ b/trunk/drivers/scsi/scsi_transport_iscsi.c @@ -234,11 +234,9 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, return 0; } -static void session_recovery_timedout(struct work_struct *work) +static void session_recovery_timedout(void *data) { - struct iscsi_cls_session *session = - container_of(work, struct iscsi_cls_session, - recovery_work.work); + struct iscsi_cls_session *session = data; dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " "out after %d secs\n", session->recovery_tmo); @@ -278,7 +276,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, session->transport = transport; session->recovery_tmo = 120; - INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); + INIT_WORK(&session->recovery_work, session_recovery_timedout, session); INIT_LIST_HEAD(&session->host_list); INIT_LIST_HEAD(&session->sess_list); diff --git a/trunk/drivers/scsi/scsi_transport_spi.c b/trunk/drivers/scsi/scsi_transport_spi.c index 3fded4831460..9f070f0d0f2b 100644 --- a/trunk/drivers/scsi/scsi_transport_spi.c +++ b/trunk/drivers/scsi/scsi_transport_spi.c @@ -964,10 +964,9 @@ struct work_queue_wrapper { }; static void -spi_dv_device_work_wrapper(struct work_struct *work) +spi_dv_device_work_wrapper(void *data) { - struct work_queue_wrapper *wqw = - container_of(work, struct work_queue_wrapper, work); + struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; struct scsi_device *sdev = wqw->sdev; kfree(wqw); @@ -1007,7 +1006,7 @@ spi_schedule_dv_device(struct scsi_device *sdev) return; } - INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); + INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); wqw->sdev = sdev; schedule_work(&wqw->work); diff --git a/trunk/drivers/serial/8250_exar_st16c554.c b/trunk/drivers/serial/8250_exar_st16c554.c deleted file mode 100644 index 567143ace159..000000000000 --- a/trunk/drivers/serial/8250_exar_st16c554.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * linux/drivers/serial/8250_exar.c - * - * Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com > - * Based on 8250_boca. - * - * Copyright (C) 2005 Russell King. - * Data taken from include/asm-i386/serial.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include - -#define PORT(_base,_irq) \ - { \ - .iobase = _base, \ - .irq = _irq, \ - .uartclk = 1843200, \ - .iotype = UPIO_PORT, \ - .flags = UPF_BOOT_AUTOCONF, \ - } - -static struct plat_serial8250_port exar_data[] = { - PORT(0x100, 5), - PORT(0x108, 5), - PORT(0x110, 5), - PORT(0x118, 5), - { }, -}; - -static struct platform_device exar_device = { - .name = "serial8250", - .id = PLAT8250_DEV_EXAR_ST16C554, - .dev = { - .platform_data = exar_data, - }, -}; - -static int __init exar_init(void) -{ - return platform_device_register(&exar_device); -} - -module_init(exar_init); - -MODULE_AUTHOR("Paul B Schroeder"); -MODULE_DESCRIPTION("8250 serial probe module for Exar cards"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/serial/8250_pnp.c b/trunk/drivers/serial/8250_pnp.c index d3d6b82706b5..71d907c8288b 100644 --- a/trunk/drivers/serial/8250_pnp.c +++ b/trunk/drivers/serial/8250_pnp.c @@ -464,38 +464,11 @@ static void __devexit serial_pnp_remove(struct pnp_dev *dev) serial8250_unregister_port(line - 1); } -#ifdef CONFIG_PM -static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state) -{ - long line = (long)pnp_get_drvdata(dev); - - if (!line) - return -ENODEV; - serial8250_suspend_port(line - 1); - return 0; -} - -static int serial_pnp_resume(struct pnp_dev *dev) -{ - long line = (long)pnp_get_drvdata(dev); - - if (!line) - return -ENODEV; - serial8250_resume_port(line - 1); - return 0; -} -#else -#define serial_pnp_suspend NULL -#define serial_pnp_resume NULL -#endif /* CONFIG_PM */ - static struct pnp_driver serial_pnp_driver = { .name = "serial", + .id_table = pnp_dev_table, .probe = serial_pnp_probe, .remove = __devexit_p(serial_pnp_remove), - .suspend = serial_pnp_suspend, - .resume = serial_pnp_resume, - .id_table = pnp_dev_table, }; static int __init serial8250_pnp_init(void) diff --git a/trunk/drivers/serial/Kconfig b/trunk/drivers/serial/Kconfig index fc12d5df10e2..0b71e7d18903 100644 --- a/trunk/drivers/serial/Kconfig +++ b/trunk/drivers/serial/Kconfig @@ -210,17 +210,6 @@ config SERIAL_8250_BOCA To compile this driver as a module, choose M here: the module will be called 8250_boca. -config SERIAL_8250_EXAR_ST16C554 - tristate "Support Exar ST16C554/554D Quad UART" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - The Uplogix Envoy TU301 uses this Exar Quad UART. If you are - tinkering with your Envoy TU301, or have a machine with this UART, - say Y here. - - To compile this driver as a module, choose M here: the module - will be called 8250_exar_st16c554. - config SERIAL_8250_HUB6 tristate "Support Hub6 cards" depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS @@ -522,25 +511,6 @@ config SERIAL_IMX_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) -config SERIAL_UARTLITE - tristate "Xilinx uartlite serial port support" - depends on PPC32 - select SERIAL_CORE - help - Say Y here if you want to use the Xilinx uartlite serial controller. - - To compile this driver as a module, choose M here: the - module will be called uartlite.ko. - -config SERIAL_UARTLITE_CONSOLE - bool "Support for console on Xilinx uartlite serial port" - depends on SERIAL_UARTLITE=y - select SERIAL_CORE_CONSOLE - help - Say Y here if you wish to use a Xilinx uartlite as the system - console (the system console is the device which receives all kernel - messages and warnings and which allows logins in single user mode). - config SERIAL_SUNCORE bool depends on SPARC diff --git a/trunk/drivers/serial/Makefile b/trunk/drivers/serial/Makefile index df3632cd7df9..b4d8a7c182e3 100644 --- a/trunk/drivers/serial/Makefile +++ b/trunk/drivers/serial/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o -obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o obj-$(CONFIG_SERIAL_8250_AU1X00) += 8250_au1x00.o @@ -56,5 +55,4 @@ obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o -obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o diff --git a/trunk/drivers/serial/amba-pl010.c b/trunk/drivers/serial/amba-pl010.c index 4d3626ef4643..4213fabc62bf 100644 --- a/trunk/drivers/serial/amba-pl010.c +++ b/trunk/drivers/serial/amba-pl010.c @@ -129,8 +129,6 @@ static void pl010_rx_chars(struct uart_port *port) */ rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX; if (unlikely(rsr & UART01x_RSR_ANY)) { - writel(0, port->membase + UART01x_ECR); - if (rsr & UART01x_RSR_BE) { rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE); port->icount.brk++; diff --git a/trunk/drivers/serial/dz.c b/trunk/drivers/serial/dz.c index af1544f3356f..53662b33b841 100644 --- a/trunk/drivers/serial/dz.c +++ b/trunk/drivers/serial/dz.c @@ -1,13 +1,11 @@ /* - * dz.c: Serial port driver for DECstations equipped + * dz.c: Serial port driver for DECStations equiped * with the DZ chipset. * * Copyright (C) 1998 Olivier A. D. Lebaillif * * Email: olivier.lebaillif@ifrsys.com * - * Copyright (C) 2004, 2006 Maciej W. Rozycki - * * [31-AUG-98] triemer * Changed IRQ to use Harald's dec internals interrupts.h * removed base_addr code - moving address assignment to setup.c @@ -28,16 +26,10 @@ #undef DEBUG_DZ -#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include #include #include #include #include -#include #include #include #include @@ -53,10 +45,14 @@ #include #include +#define CONSOLE_LINE (3) /* for definition of struct console */ + #include "dz.h" +#define DZ_INTR_DEBUG 1 + static char *dz_name = "DECstation DZ serial driver version "; -static char *dz_version = "1.03"; +static char *dz_version = "1.02"; struct dz_port { struct uart_port port; @@ -65,6 +61,22 @@ struct dz_port { static struct dz_port dz_ports[DZ_NB_PORT]; +#ifdef DEBUG_DZ +/* + * debugging code to send out chars via prom + */ +static void debug_console(const char *s, int count) +{ + unsigned i; + + for (i = 0; i < count; i++) { + if (*s == 10) + prom_printf("%c", 13); + prom_printf("%c", *s++); + } +} +#endif + /* * ------------------------------------------------------------ * dz_in () and dz_out () @@ -78,7 +90,6 @@ static inline unsigned short dz_in(struct dz_port *dport, unsigned offset) { volatile unsigned short *addr = (volatile unsigned short *) (dport->port.membase + offset); - return *addr; } @@ -87,7 +98,6 @@ static inline void dz_out(struct dz_port *dport, unsigned offset, { volatile unsigned short *addr = (volatile unsigned short *) (dport->port.membase + offset); - *addr = value; } @@ -134,7 +144,7 @@ static void dz_stop_rx(struct uart_port *uport) spin_lock_irqsave(&dport->port.lock, flags); dport->cflag &= ~DZ_CREAD; - dz_out(dport, DZ_LPR, dport->cflag | dport->port.line); + dz_out(dport, DZ_LPR, dport->cflag); spin_unlock_irqrestore(&dport->port.lock, flags); } @@ -145,14 +155,14 @@ static void dz_enable_ms(struct uart_port *port) /* * ------------------------------------------------------------ + * Here starts the interrupt handling routines. All of the + * following subroutines are declared as inline and are folded + * into dz_interrupt. They were separated out for readability's + * sake. * - * Here start the interrupt handling routines. All of the following - * subroutines are declared as inline and are folded into - * dz_interrupt. They were separated out for readability's sake. - * - * Note: dz_interrupt() is a "fast" interrupt, which means that it + * Note: rs_interrupt() is a "fast" interrupt, which means that it * runs with interrupts turned off. People who may want to modify - * dz_interrupt() should try to keep the interrupt handler as fast as + * rs_interrupt() should try to keep the interrupt handler as fast as * possible. After you are done making modifications, it is not a bad * idea to do: * @@ -170,74 +180,92 @@ static void dz_enable_ms(struct uart_port *port) * This routine deals with inputs from any lines. * ------------------------------------------------------------ */ -static inline void dz_receive_chars(struct dz_port *dport_in, - struct pt_regs *regs) +static inline void dz_receive_chars(struct dz_port *dport) { - struct dz_port *dport; struct tty_struct *tty = NULL; struct uart_icount *icount; - int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 }; - unsigned short status; + int ignore = 0; + unsigned short status, tmp; unsigned char ch, flag; - int i; - while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) { - dport = &dz_ports[LINE(status)]; - tty = dport->port.info->tty; /* point to the proper dev */ + /* this code is going to be a problem... + the call to tty_flip_buffer is going to need + to be rethought... + */ + do { + status = dz_in(dport, DZ_RBUF); - ch = UCHAR(status); /* grab the char */ + /* punt so we don't get duplicate characters */ + if (!(status & DZ_DVAL)) + goto ignore_char; - icount = &dport->port.icount; - icount->rx++; + ch = UCHAR(status); /* grab the char */ flag = TTY_NORMAL; - if (status & DZ_FERR) { /* frame error */ - /* - * There is no separate BREAK status bit, so - * treat framing errors as BREAKs for Magic SysRq - * and SAK; normally, otherwise. - */ - if (uart_handle_break(&dport->port)) - continue; - if (dport->port.flags & UPF_SAK) - flag = TTY_BREAK; - else - flag = TTY_FRAME; - } else if (status & DZ_OERR) /* overrun error */ - flag = TTY_OVERRUN; - else if (status & DZ_PERR) /* parity error */ - flag = TTY_PARITY; - /* keep track of the statistics */ - switch (flag) { - case TTY_FRAME: - icount->frame++; - break; - case TTY_PARITY: - icount->parity++; - break; - case TTY_OVERRUN: - icount->overrun++; - break; - case TTY_BREAK: - icount->brk++; - break; - default: - break; +#if 0 + if (info->is_console) { + if (ch == 0) + return; /* it's a break ... */ } +#endif - if (uart_handle_sysrq_char(&dport->port, ch, regs)) - continue; + tty = dport->port.info->tty;/* now tty points to the proper dev */ + icount = &dport->port.icount; + + if (!tty) + break; - if ((status & dport->port.ignore_status_mask) == 0) { - uart_insert_char(&dport->port, - status, DZ_OERR, ch, flag); - lines_rx[LINE(status)] = 1; + icount->rx++; + + /* keep track of the statistics */ + if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) { + if (status & DZ_PERR) /* parity error */ + icount->parity++; + else if (status & DZ_FERR) /* frame error */ + icount->frame++; + if (status & DZ_OERR) /* overrun error */ + icount->overrun++; + + /* check to see if we should ignore the character + and mask off conditions that should be ignored + */ + + if (status & dport->port.ignore_status_mask) { + if (++ignore > 100) + break; + goto ignore_char; + } + /* mask off the error conditions we want to ignore */ + tmp = status & dport->port.read_status_mask; + + if (tmp & DZ_PERR) { + flag = TTY_PARITY; +#ifdef DEBUG_DZ + debug_console("PERR\n", 5); +#endif + } else if (tmp & DZ_FERR) { + flag = TTY_FRAME; +#ifdef DEBUG_DZ + debug_console("FERR\n", 5); +#endif + } + if (tmp & DZ_OERR) { +#ifdef DEBUG_DZ + debug_console("OERR\n", 5); +#endif + tty_insert_flip_char(tty, ch, flag); + ch = 0; + flag = TTY_OVERRUN; + } } - } - for (i = 0; i < DZ_NB_PORT; i++) - if (lines_rx[i]) - tty_flip_buffer_push(dz_ports[i].port.info->tty); + tty_insert_flip_char(tty, ch, flag); + ignore_char: + ; + } while (status & DZ_DVAL); + + if (tty) + tty_flip_buffer_push(tty); } /* @@ -247,32 +275,26 @@ static inline void dz_receive_chars(struct dz_port *dport_in, * This routine deals with outputs to any lines. * ------------------------------------------------------------ */ -static inline void dz_transmit_chars(struct dz_port *dport_in) +static inline void dz_transmit_chars(struct dz_port *dport) { - struct dz_port *dport; - struct circ_buf *xmit; - unsigned short status; + struct circ_buf *xmit = &dport->port.info->xmit; unsigned char tmp; - status = dz_in(dport_in, DZ_CSR); - dport = &dz_ports[LINE(status)]; - xmit = &dport->port.info->xmit; - - if (dport->port.x_char) { /* XON/XOFF chars */ + if (dport->port.x_char) { /* XON/XOFF chars */ dz_out(dport, DZ_TDR, dport->port.x_char); dport->port.icount.tx++; dport->port.x_char = 0; return; } - /* If nothing to do or stopped or hardware stopped. */ + /* if nothing to do or stopped or hardware stopped */ if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) { dz_stop_tx(&dport->port); return; } /* - * If something to do... (remember the dz has no output fifo, - * so we go one char at a time) :-< + * if something to do ... (rember the dz has no output fifo so we go + * one char at a time :-< */ tmp = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1); @@ -282,29 +304,23 @@ static inline void dz_transmit_chars(struct dz_port *dport_in) if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS) uart_write_wakeup(&dport->port); - /* Are we are done. */ + /* Are we done */ if (uart_circ_empty(xmit)) dz_stop_tx(&dport->port); } /* * ------------------------------------------------------------ - * check_modem_status() + * check_modem_status () * - * DS 3100 & 5100: Only valid for the MODEM line, duh! - * DS 5000/200: Valid for the MODEM and PRINTER line. + * Only valid for the MODEM line duh ! * ------------------------------------------------------------ */ static inline void check_modem_status(struct dz_port *dport) { - /* - * FIXME: - * 1. No status change interrupt; use a timer. - * 2. Handle the 3100/5000 as appropriate. --macro - */ unsigned short status; - /* If not the modem line just return. */ + /* if not ne modem line just return */ if (dport->port.line != DZ_MODEM) return; @@ -325,18 +341,21 @@ static inline void check_modem_status(struct dz_port *dport) */ static irqreturn_t dz_interrupt(int irq, void *dev) { - struct dz_port *dport = (struct dz_port *)dev; + struct dz_port *dport; unsigned short status; /* get the reason why we just got an irq */ - status = dz_in(dport, DZ_CSR); + status = dz_in((struct dz_port *)dev, DZ_CSR); + dport = &dz_ports[LINE(status)]; - if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) - dz_receive_chars(dport, regs); + if (status & DZ_RDONE) + dz_receive_chars(dport); - if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) + if (status & DZ_TRDY) dz_transmit_chars(dport); + /* FIXME: what about check modem status??? --rmk */ + return IRQ_HANDLED; } @@ -348,13 +367,13 @@ static irqreturn_t dz_interrupt(int irq, void *dev) static unsigned int dz_get_mctrl(struct uart_port *uport) { - /* - * FIXME: Handle the 3100/5000 as appropriate. --macro - */ struct dz_port *dport = (struct dz_port *)uport; unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; if (dport->port.line == DZ_MODEM) { + /* + * CHECKME: This is a guess from the other code... --rmk + */ if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR) mctrl &= ~TIOCM_DSR; } @@ -364,9 +383,6 @@ static unsigned int dz_get_mctrl(struct uart_port *uport) static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl) { - /* - * FIXME: Handle the 3100/5000 as appropriate. --macro - */ struct dz_port *dport = (struct dz_port *)uport; unsigned short tmp; @@ -393,6 +409,13 @@ static int dz_startup(struct uart_port *uport) unsigned long flags; unsigned short tmp; + /* The dz lines for the mouse/keyboard must be + * opened using their respective drivers. + */ + if ((dport->port.line == DZ_KEYBOARD) || + (dport->port.line == DZ_MOUSE)) + return -ENODEV; + spin_lock_irqsave(&dport->port.lock, flags); /* enable the interrupt and the scanning */ @@ -419,8 +442,7 @@ static void dz_shutdown(struct uart_port *uport) } /* - * ------------------------------------------------------------------- - * dz_tx_empty() -- get the transmitter empty status + * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must @@ -428,28 +450,21 @@ static void dz_shutdown(struct uart_port *uport) * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. - * ------------------------------------------------------------------- */ static unsigned int dz_tx_empty(struct uart_port *uport) { struct dz_port *dport = (struct dz_port *)uport; - unsigned short tmp, mask = 1 << dport->port.line; + unsigned short status = dz_in(dport, DZ_LPR); - tmp = dz_in(dport, DZ_TCR); - tmp &= mask; - - return tmp ? 0 : TIOCSER_TEMT; + /* FIXME: this appears to be obviously broken --rmk. */ + return status ? TIOCSER_TEMT : 0; } static void dz_break_ctl(struct uart_port *uport, int break_state) { - /* - * FIXME: Can't access BREAK bits in TDR easily; - * reuse the code for polled TX. --macro - */ struct dz_port *dport = (struct dz_port *)uport; unsigned long flags; - unsigned short tmp, mask = 1 << dport->port.line; + unsigned short tmp, mask = 1 << uport->line; spin_lock_irqsave(&uport->lock, flags); tmp = dz_in(dport, DZ_TCR); @@ -546,7 +561,7 @@ static void dz_set_termios(struct uart_port *uport, struct termios *termios, spin_lock_irqsave(&dport->port.lock, flags); - dz_out(dport, DZ_LPR, cflag | dport->port.line); + dz_out(dport, DZ_LPR, cflag); dport->cflag = cflag; /* setup accept flag */ @@ -635,7 +650,7 @@ static void __init dz_init_ports(void) for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) { spin_lock_init(&dport->port.lock); dport->port.membase = (char *) base; - dport->port.iotype = UPIO_MEM; + dport->port.iotype = UPIO_PORT; dport->port.irq = dec_interrupt[DEC_IRQ_DZ11]; dport->port.line = i; dport->port.fifosize = 1; @@ -647,7 +662,10 @@ static void __init dz_init_ports(void) static void dz_reset(struct dz_port *dport) { dz_out(dport, DZ_CSR, DZ_CLR); + while (dz_in(dport, DZ_CSR) & DZ_CLR); + /* FIXME: cpu_relax? */ + iob(); /* enable scanning */ @@ -655,55 +673,26 @@ static void dz_reset(struct dz_port *dport) } #ifdef CONFIG_SERIAL_DZ_CONSOLE -/* - * ------------------------------------------------------------------- - * dz_console_putchar() -- transmit a character - * - * Polled transmission. This is tricky. We need to mask transmit - * interrupts so that they do not interfere, enable the transmitter - * for the line requested and then wait till the transmit scanner - * requests data for this line. But it may request data for another - * line first, in which case we have to disable its transmitter and - * repeat waiting till our line pops up. Only then the character may - * be transmitted. Finally, the state of the transmitter mask is - * restored. Welcome to the world of PDP-11! - * ------------------------------------------------------------------- - */ static void dz_console_putchar(struct uart_port *uport, int ch) { struct dz_port *dport = (struct dz_port *)uport; unsigned long flags; - unsigned short csr, tcr, trdy, mask; - int loops = 10000; + int loops = 2500; + unsigned short tmp = (unsigned char)ch; + /* this code sends stuff out to serial device - spinning its + wheels and waiting. */ spin_lock_irqsave(&dport->port.lock, flags); - csr = dz_in(dport, DZ_CSR); - dz_out(dport, DZ_CSR, csr & ~DZ_TIE); - tcr = dz_in(dport, DZ_TCR); - tcr |= 1 << dport->port.line; - mask = tcr; - dz_out(dport, DZ_TCR, mask); - iob(); - spin_unlock_irqrestore(&dport->port.lock, flags); - while (loops--) { - trdy = dz_in(dport, DZ_CSR); - if (!(trdy & DZ_TRDY)) - continue; - trdy = (trdy & DZ_TLINE) >> 8; - if (trdy == dport->port.line) - break; - mask &= ~(1 << trdy); - dz_out(dport, DZ_TCR, mask); - iob(); - udelay(2); - } + /* spin our wheels */ + while (((dz_in(dport, DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--) + /* FIXME: cpu_relax, udelay? --rmk */ + ; - if (loops) /* Cannot send otherwise. */ - dz_out(dport, DZ_TDR, ch); + /* Actually transmit the character. */ + dz_out(dport, DZ_TDR, tmp); - dz_out(dport, DZ_TCR, tcr); - dz_out(dport, DZ_CSR, csr); + spin_unlock_irqrestore(&dport->port.lock, flags); } /* @@ -714,11 +703,11 @@ static void dz_console_putchar(struct uart_port *uport, int ch) * The console must be locked when we get here. * ------------------------------------------------------------------- */ -static void dz_console_print(struct console *co, +static void dz_console_print(struct console *cons, const char *str, unsigned int count) { - struct dz_port *dport = &dz_ports[co->index]; + struct dz_port *dport = &dz_ports[CONSOLE_LINE]; #ifdef DEBUG_DZ prom_printf((char *) str); #endif @@ -727,42 +716,48 @@ static void dz_console_print(struct console *co, static int __init dz_console_setup(struct console *co, char *options) { - struct dz_port *dport = &dz_ports[co->index]; + struct dz_port *dport = &dz_ports[CONSOLE_LINE]; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; + int ret; + unsigned short mask, tmp; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); dz_reset(dport); - return uart_set_options(&dport->port, co, baud, parity, bits, flow); + ret = uart_set_options(&dport->port, co, baud, parity, bits, flow); + if (ret == 0) { + mask = 1 << dport->port.line; + tmp = dz_in(dport, DZ_TCR); /* read the TX flag */ + if (!(tmp & mask)) { + tmp |= mask; /* set the TX flag */ + dz_out(dport, DZ_TCR, tmp); + } + } + + return ret; } -static struct uart_driver dz_reg; -static struct console dz_sercons = { +static struct console dz_sercons = +{ .name = "ttyS", .write = dz_console_print, .device = uart_console_device, .setup = dz_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &dz_reg, + .flags = CON_CONSDEV | CON_PRINTBUFFER, + .index = CONSOLE_LINE, }; -static int __init dz_serial_console_init(void) +void __init dz_serial_console_init(void) { - if (!IOASIC) { - dz_init_ports(); - register_console(&dz_sercons); - return 0; - } else - return -ENXIO; -} + dz_init_ports(); -console_initcall(dz_serial_console_init); + register_console(&dz_sercons); +} #define SERIAL_DZ_CONSOLE &dz_sercons #else @@ -772,29 +767,35 @@ console_initcall(dz_serial_console_init); static struct uart_driver dz_reg = { .owner = THIS_MODULE, .driver_name = "serial", - .dev_name = "ttyS", + .dev_name = "ttyS%d", .major = TTY_MAJOR, .minor = 64, .nr = DZ_NB_PORT, .cons = SERIAL_DZ_CONSOLE, }; -static int __init dz_init(void) +int __init dz_init(void) { + unsigned long flags; int ret, i; - if (IOASIC) - return -ENXIO; - printk("%s%s\n", dz_name, dz_version); dz_init_ports(); + save_flags(flags); + cli(); + #ifndef CONFIG_SERIAL_DZ_CONSOLE /* reset the chip */ dz_reset(&dz_ports[0]); #endif + /* order matters here... the trick is that flags + is updated... in request_irq - to immediatedly obliterate + it is unwise. */ + restore_flags(flags); + if (request_irq(dz_ports[0].port.irq, dz_interrupt, IRQF_DISABLED, "DZ", &dz_ports[0])) panic("Unable to register DZ interrupt"); @@ -809,7 +810,5 @@ static int __init dz_init(void) return ret; } -module_init(dz_init); - MODULE_DESCRIPTION("DECstation DZ serial driver"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/serial/dz.h b/trunk/drivers/serial/dz.h index 9674d4e49872..86ef417382bb 100644 --- a/trunk/drivers/serial/dz.h +++ b/trunk/drivers/serial/dz.h @@ -1,22 +1,20 @@ /* - * dz.h: Serial port driver for DECstations equipped + * dz.h: Serial port driver for DECStations equiped * with the DZ chipset. * * Copyright (C) 1998 Olivier A. D. Lebaillif * * Email: olivier.lebaillif@ifrsys.com * - * Copyright (C) 2004, 2006 Maciej W. Rozycki */ #ifndef DZ_SERIAL_H #define DZ_SERIAL_H /* - * Definitions for the Control and Status Register. + * Definitions for the Control and Status Received. */ #define DZ_TRDY 0x8000 /* Transmitter empty */ -#define DZ_TIE 0x4000 /* Transmitter Interrupt Enbl */ -#define DZ_TLINE 0x0300 /* Transmitter Line Number */ +#define DZ_TIE 0x4000 /* Transmitter Interrupt Enable */ #define DZ_RDONE 0x0080 /* Receiver data ready */ #define DZ_RIE 0x0040 /* Receive Interrupt Enable */ #define DZ_MSE 0x0020 /* Master Scan Enable */ @@ -24,44 +22,32 @@ #define DZ_MAINT 0x0008 /* Loop Back Mode */ /* - * Definitions for the Receiver Buffer Register. + * Definitions for the Received buffer. */ -#define DZ_RBUF_MASK 0x00FF /* Data Mask */ -#define DZ_LINE_MASK 0x0300 /* Line Mask */ +#define DZ_RBUF_MASK 0x00FF /* Data Mask in the Receive Buffer */ +#define DZ_LINE_MASK 0x0300 /* Line Mask in the Receive Buffer */ #define DZ_DVAL 0x8000 /* Valid Data indicator */ #define DZ_OERR 0x4000 /* Overrun error indicator */ #define DZ_FERR 0x2000 /* Frame error indicator */ #define DZ_PERR 0x1000 /* Parity error indicator */ -#define LINE(x) ((x & DZ_LINE_MASK) >> 8) /* Get the line number - from the input buffer */ -#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK)) +#define LINE(x) (x & DZ_LINE_MASK) >> 8 /* Get the line number from the input buffer */ +#define UCHAR(x) (unsigned char)(x & DZ_RBUF_MASK) /* - * Definitions for the Transmit Control Register. + * Definitions for the Transmit Register. */ #define DZ_LINE_KEYBOARD 0x0001 #define DZ_LINE_MOUSE 0x0002 #define DZ_LINE_MODEM 0x0004 #define DZ_LINE_PRINTER 0x0008 -#define DZ_MODEM_RTS 0x0800 /* RTS for the modem line (2) */ #define DZ_MODEM_DTR 0x0400 /* DTR for the modem line (2) */ -#define DZ_PRINT_RTS 0x0200 /* RTS for the prntr line (3) */ -#define DZ_PRINT_DTR 0x0100 /* DTR for the prntr line (3) */ -#define DZ_LNENB 0x000f /* Transmitter Line Enable */ /* * Definitions for the Modem Status Register. */ -#define DZ_MODEM_RI 0x0800 /* RI for the modem line (2) */ -#define DZ_MODEM_CD 0x0400 /* CD for the modem line (2) */ #define DZ_MODEM_DSR 0x0200 /* DSR for the modem line (2) */ -#define DZ_MODEM_CTS 0x0100 /* CTS for the modem line (2) */ -#define DZ_PRINT_RI 0x0008 /* RI for the printer line (3) */ -#define DZ_PRINT_CD 0x0004 /* CD for the printer line (3) */ -#define DZ_PRINT_DSR 0x0002 /* DSR for the prntr line (3) */ -#define DZ_PRINT_CTS 0x0001 /* CTS for the prntr line (3) */ /* * Definitions for the Transmit Data Register. diff --git a/trunk/drivers/serial/mcfserial.c b/trunk/drivers/serial/mcfserial.c index 3db206d29b33..aee1b31f1a1c 100644 --- a/trunk/drivers/serial/mcfserial.c +++ b/trunk/drivers/serial/mcfserial.c @@ -60,8 +60,7 @@ struct timer_list mcfrs_timer_struct; #if defined(CONFIG_HW_FEITH) #define CONSOLE_BAUD_RATE 38400 #define DEFAULT_CBAUD B38400 -#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \ - defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO) +#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB) #define CONSOLE_BAUD_RATE 115200 #define DEFAULT_CBAUD B115200 #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \ @@ -110,30 +109,12 @@ static struct mcf_serial mcfrs_table[] = { .irq = IRQBASE, .flags = ASYNC_BOOT_AUTOCONF, }, -#ifdef MCFUART_BASE2 { /* ttyS1 */ .magic = 0, .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2), .irq = IRQBASE+1, .flags = ASYNC_BOOT_AUTOCONF, }, -#endif -#ifdef MCFUART_BASE3 - { /* ttyS2 */ - .magic = 0, - .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3), - .irq = IRQBASE+2, - .flags = ASYNC_BOOT_AUTOCONF, - }, -#endif -#ifdef MCFUART_BASE4 - { /* ttyS3 */ - .magic = 0, - .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4), - .irq = IRQBASE+3, - .flags = ASYNC_BOOT_AUTOCONF, - }, -#endif }; @@ -1535,22 +1516,6 @@ static void mcfrs_irqinit(struct mcf_serial *info) imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 + MCFINTC_IMRL); *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1); -#if defined(CONFIG_M527x) - { - /* - * External Pin Mask Setting & Enable External Pin for Interface - * mrcbis@aliceposta.it - */ - unsigned short *serpin_enable_mask; - serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART); - if (info->line == 0) - *serpin_enable_mask |= UART0_ENABLE_MASK; - else if (info->line == 1) - *serpin_enable_mask |= UART1_ENABLE_MASK; - else if (info->line == 2) - *serpin_enable_mask |= UART2_ENABLE_MASK; - } -#endif #elif defined(CONFIG_M520x) volatile unsigned char *icrp, *uartp; volatile unsigned long *imrp; @@ -1748,7 +1713,7 @@ mcfrs_init(void) /* Initialize the tty_driver structure */ mcfrs_serial_driver->owner = THIS_MODULE; mcfrs_serial_driver->name = "ttyS"; - mcfrs_serial_driver->driver_name = "mcfserial"; + mcfrs_serial_driver->driver_name = "serial"; mcfrs_serial_driver->major = TTY_MAJOR; mcfrs_serial_driver->minor_start = 64; mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; @@ -1832,23 +1797,10 @@ void mcfrs_init_console(void) uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8; uartp[MCFUART_UMR] = MCFUART_MR2_STOP1; -#ifdef CONFIG_M5272 -{ - /* - * For the MCF5272, also compute the baudrate fraction. - */ - int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud); - fraction *= 16; - fraction /= (32 * mcfrs_console_baud); - uartp[MCFUART_UFPD] = (fraction & 0xf); /* set fraction */ - clk = (MCF_BUSCLK / mcfrs_console_baud) / 32; -} -#else clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */ -#endif - uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8; /* set msb baud */ uartp[MCFUART_UBG2] = (clk & 0xff); /* set lsb baud */ + uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER; uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE; diff --git a/trunk/drivers/serial/mpsc.c b/trunk/drivers/serial/mpsc.c index 29823bd60fb0..8eea69f29989 100644 --- a/trunk/drivers/serial/mpsc.c +++ b/trunk/drivers/serial/mpsc.c @@ -555,7 +555,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info *pi) if (!mpsc_sdma_tx_active(pi)) { txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); - dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); + dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)txre, @@ -931,7 +931,7 @@ mpsc_init_rings(struct mpsc_port_info *pi) } txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ - dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, + dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ @@ -1005,7 +1005,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); - dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); + dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, @@ -1029,7 +1029,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) } bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); - dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); + dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)bp, @@ -1098,7 +1098,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_L); wmb(); - dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); + dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)rxre, @@ -1109,7 +1109,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); - dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); + dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, @@ -1143,7 +1143,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) SDMA_DESC_CMDSTAT_EI : 0)); wmb(); - dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); + dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)txre, @@ -1192,7 +1192,7 @@ mpsc_copy_tx_data(struct mpsc_port_info *pi) else /* All tx data copied into ring bufs */ return; - dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); + dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)bp, @@ -1217,7 +1217,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi) txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); - dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); + dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)txre, @@ -1235,7 +1235,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi) txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); - dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, + dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ @@ -1652,7 +1652,7 @@ mpsc_console_write(struct console *co, const char *s, uint count) count--; } - dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); + dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)bp, diff --git a/trunk/drivers/serial/sh-sci.c b/trunk/drivers/serial/sh-sci.c index 3b5f19ec2126..cfcc3caf49d8 100644 --- a/trunk/drivers/serial/sh-sci.c +++ b/trunk/drivers/serial/sh-sci.c @@ -775,7 +775,7 @@ static int sci_notifier(struct notifier_block *self, * * Clean this up later.. */ - clk = clk_get(NULL, "module_clk"); + clk = clk_get("module_clk"); port->uartclk = clk_get_rate(clk) * 16; clk_put(clk); } @@ -960,7 +960,7 @@ static void sci_set_termios(struct uart_port *port, struct termios *termios, default: { #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) - struct clk *clk = clk_get(NULL, "module_clk"); + struct clk *clk = clk_get("module_clk"); t = SCBRR_VALUE(baud, clk_get_rate(clk)); clk_put(clk); #else @@ -1128,7 +1128,7 @@ static void __init sci_init_ports(void) * XXX: We should use a proper SCI/SCIF clock */ { - struct clk *clk = clk_get(NULL, "module_clk"); + struct clk *clk = clk_get("module_clk"); sci_ports[i].port.uartclk = clk_get_rate(clk) * 16; clk_put(clk); } diff --git a/trunk/drivers/serial/sh-sci.h b/trunk/drivers/serial/sh-sci.h index e4557cc4f74b..7ee992146ae9 100644 --- a/trunk/drivers/serial/sh-sci.h +++ b/trunk/drivers/serial/sh-sci.h @@ -133,20 +133,6 @@ # define SCIF_ORER 0x0001 /* Overrun error bit */ # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ # define SCIF_ONLY -#elif defined(CONFIG_CPU_SUBTYPE_SH7206) -# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ -# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ -# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ -# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ -# define SCIF_ONLY -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -# define SCSPTR0 0xf8400020 /* 16 bit SCIF */ -# define SCSPTR1 0xf8410020 /* 16 bit SCIF */ -# define SCSPTR2 0xf8420020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ -# define SCIF_ONLY #else # error CPU subtype not defined #endif @@ -379,7 +365,6 @@ SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780) -SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) @@ -559,28 +544,6 @@ static inline int sci_rxd_in(struct uart_port *port) if (port->mapbase == 0xffe10000) return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ } -#elif defined(CONFIG_CPU_SUBTYPE_SH7206) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xfffe8000) - return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ - if (port->mapbase == 0xfffe8800) - return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ - if (port->mapbase == 0xfffe9000) - return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ - if (port->mapbase == 0xfffe9800) - return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xf8400000) - return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ - if (port->mapbase == 0xf8410000) - return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ - if (port->mapbase == 0xf8420000) - return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ -} #endif /* diff --git a/trunk/drivers/serial/uartlite.c b/trunk/drivers/serial/uartlite.c deleted file mode 100644 index 83690653b78b..000000000000 --- a/trunk/drivers/serial/uartlite.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * uartlite.c: Serial driver for Xilinx uartlite serial controller - * - * Peter Korsgaard - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define ULITE_MAJOR 204 -#define ULITE_MINOR 187 -#define ULITE_NR_UARTS 4 - -/* For register details see datasheet: - http://www.xilinx.com/bvdocs/ipcenter/data_sheet/opb_uartlite.pdf -*/ -#define ULITE_RX 0x00 -#define ULITE_TX 0x04 -#define ULITE_STATUS 0x08 -#define ULITE_CONTROL 0x0c - -#define ULITE_REGION 16 - -#define ULITE_STATUS_RXVALID 0x01 -#define ULITE_STATUS_RXFULL 0x02 -#define ULITE_STATUS_TXEMPTY 0x04 -#define ULITE_STATUS_TXFULL 0x08 -#define ULITE_STATUS_IE 0x10 -#define ULITE_STATUS_OVERRUN 0x20 -#define ULITE_STATUS_FRAME 0x40 -#define ULITE_STATUS_PARITY 0x80 - -#define ULITE_CONTROL_RST_TX 0x01 -#define ULITE_CONTROL_RST_RX 0x02 -#define ULITE_CONTROL_IE 0x10 - - -static struct uart_port ports[ULITE_NR_UARTS]; - -static int ulite_receive(struct uart_port *port, int stat) -{ - struct tty_struct *tty = port->info->tty; - unsigned char ch = 0; - char flag = TTY_NORMAL; - - if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN - | ULITE_STATUS_FRAME)) == 0) - return 0; - - /* stats */ - if (stat & ULITE_STATUS_RXVALID) { - port->icount.rx++; - ch = readb(port->membase + ULITE_RX); - - if (stat & ULITE_STATUS_PARITY) - port->icount.parity++; - } - - if (stat & ULITE_STATUS_OVERRUN) - port->icount.overrun++; - - if (stat & ULITE_STATUS_FRAME) - port->icount.frame++; - - - /* drop byte with parity error if IGNPAR specificed */ - if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY) - stat &= ~ULITE_STATUS_RXVALID; - - stat &= port->read_status_mask; - - if (stat & ULITE_STATUS_PARITY) - flag = TTY_PARITY; - - - stat &= ~port->ignore_status_mask; - - if (stat & ULITE_STATUS_RXVALID) - tty_insert_flip_char(tty, ch, flag); - - if (stat & ULITE_STATUS_FRAME) - tty_insert_flip_char(tty, 0, TTY_FRAME); - - if (stat & ULITE_STATUS_OVERRUN) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); - - return 1; -} - -static int ulite_transmit(struct uart_port *port, int stat) -{ - struct circ_buf *xmit = &port->info->xmit; - - if (stat & ULITE_STATUS_TXFULL) - return 0; - - if (port->x_char) { - writeb(port->x_char, port->membase + ULITE_TX); - port->x_char = 0; - port->icount.tx++; - return 1; - } - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) - return 0; - - writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); - port->icount.tx++; - - /* wake up */ - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - return 1; -} - -static irqreturn_t ulite_isr(int irq, void *dev_id) -{ - struct uart_port *port = (struct uart_port *)dev_id; - int busy; - - do { - int stat = readb(port->membase + ULITE_STATUS); - busy = ulite_receive(port, stat); - busy |= ulite_transmit(port, stat); - } while (busy); - - tty_flip_buffer_push(port->info->tty); - - return IRQ_HANDLED; -} - -static unsigned int ulite_tx_empty(struct uart_port *port) -{ - unsigned long flags; - unsigned int ret; - - spin_lock_irqsave(&port->lock, flags); - ret = readb(port->membase + ULITE_STATUS); - spin_unlock_irqrestore(&port->lock, flags); - - return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0; -} - -static unsigned int ulite_get_mctrl(struct uart_port *port) -{ - return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; -} - -static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl) -{ - /* N/A */ -} - -static void ulite_stop_tx(struct uart_port *port) -{ - /* N/A */ -} - -static void ulite_start_tx(struct uart_port *port) -{ - ulite_transmit(port, readb(port->membase + ULITE_STATUS)); -} - -static void ulite_stop_rx(struct uart_port *port) -{ - /* don't forward any more data (like !CREAD) */ - port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY - | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; -} - -static void ulite_enable_ms(struct uart_port *port) -{ - /* N/A */ -} - -static void ulite_break_ctl(struct uart_port *port, int ctl) -{ - /* N/A */ -} - -static int ulite_startup(struct uart_port *port) -{ - int ret; - - ret = request_irq(port->irq, ulite_isr, - IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "uartlite", port); - if (ret) - return ret; - - writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX, - port->membase + ULITE_CONTROL); - writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL); - - return 0; -} - -static void ulite_shutdown(struct uart_port *port) -{ - writeb(0, port->membase + ULITE_CONTROL); - readb(port->membase + ULITE_CONTROL); /* dummy */ - free_irq(port->irq, port); -} - -static void ulite_set_termios(struct uart_port *port, struct termios *termios, - struct termios *old) -{ - unsigned long flags; - unsigned int baud; - - spin_lock_irqsave(&port->lock, flags); - - port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN - | ULITE_STATUS_TXFULL; - - if (termios->c_iflag & INPCK) - port->read_status_mask |= - ULITE_STATUS_PARITY | ULITE_STATUS_FRAME; - - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= ULITE_STATUS_PARITY - | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; - - /* ignore all characters if CREAD is not set */ - if ((termios->c_cflag & CREAD) == 0) - port->ignore_status_mask |= - ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY - | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; - - /* update timeout */ - baud = uart_get_baud_rate(port, termios, old, 0, 460800); - uart_update_timeout(port, termios->c_cflag, baud); - - spin_unlock_irqrestore(&port->lock, flags); -} - -static const char *ulite_type(struct uart_port *port) -{ - return port->type == PORT_UARTLITE ? "uartlite" : NULL; -} - -static void ulite_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, ULITE_REGION); - iounmap(port->membase); - port->membase = 0; -} - -static int ulite_request_port(struct uart_port *port) -{ - if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) { - dev_err(port->dev, "Memory region busy\n"); - return -EBUSY; - } - - port->membase = ioremap(port->mapbase, ULITE_REGION); - if (!port->membase) { - dev_err(port->dev, "Unable to map registers\n"); - release_mem_region(port->mapbase, ULITE_REGION); - return -EBUSY; - } - - return 0; -} - -static void ulite_config_port(struct uart_port *port, int flags) -{ - ulite_request_port(port); - port->type = PORT_UARTLITE; -} - -static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser) -{ - /* we don't want the core code to modify any port params */ - return -EINVAL; -} - -static struct uart_ops ulite_ops = { - .tx_empty = ulite_tx_empty, - .set_mctrl = ulite_set_mctrl, - .get_mctrl = ulite_get_mctrl, - .stop_tx = ulite_stop_tx, - .start_tx = ulite_start_tx, - .stop_rx = ulite_stop_rx, - .enable_ms = ulite_enable_ms, - .break_ctl = ulite_break_ctl, - .startup = ulite_startup, - .shutdown = ulite_shutdown, - .set_termios = ulite_set_termios, - .type = ulite_type, - .release_port = ulite_release_port, - .request_port = ulite_request_port, - .config_port = ulite_config_port, - .verify_port = ulite_verify_port -}; - -#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE -static void ulite_console_wait_tx(struct uart_port *port) -{ - int i; - - /* wait up to 10ms for the character(s) to be sent */ - for (i = 0; i < 10000; i++) { - if (readb(port->membase + ULITE_STATUS) & ULITE_STATUS_TXEMPTY) - break; - udelay(1); - } -} - -static void ulite_console_putchar(struct uart_port *port, int ch) -{ - ulite_console_wait_tx(port); - writeb(ch, port->membase + ULITE_TX); -} - -static void ulite_console_write(struct console *co, const char *s, - unsigned int count) -{ - struct uart_port *port = &ports[co->index]; - unsigned long flags; - unsigned int ier; - int locked = 1; - - if (oops_in_progress) { - locked = spin_trylock_irqsave(&port->lock, flags); - } else - spin_lock_irqsave(&port->lock, flags); - - /* save and disable interrupt */ - ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE; - writeb(0, port->membase + ULITE_CONTROL); - - uart_console_write(port, s, count, ulite_console_putchar); - - ulite_console_wait_tx(port); - - /* restore interrupt state */ - if (ier) - writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL); - - if (locked) - spin_unlock_irqrestore(&port->lock, flags); -} - -static int __init ulite_console_setup(struct console *co, char *options) -{ - struct uart_port *port; - int baud = 9600; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - if (co->index < 0 || co->index >= ULITE_NR_UARTS) - return -EINVAL; - - port = &ports[co->index]; - - /* not initialized yet? */ - if (!port->membase) - return -ENODEV; - - if (options) - uart_parse_options(options, &baud, &parity, &bits, &flow); - - return uart_set_options(port, co, baud, parity, bits, flow); -} - -static struct uart_driver ulite_uart_driver; - -static struct console ulite_console = { - .name = "ttyUL", - .write = ulite_console_write, - .device = uart_console_device, - .setup = ulite_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */ - .data = &ulite_uart_driver, -}; - -static int __init ulite_console_init(void) -{ - register_console(&ulite_console); - return 0; -} - -console_initcall(ulite_console_init); - -#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ - -static struct uart_driver ulite_uart_driver = { - .owner = THIS_MODULE, - .driver_name = "uartlite", - .dev_name = "ttyUL", - .major = ULITE_MAJOR, - .minor = ULITE_MINOR, - .nr = ULITE_NR_UARTS, -#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE - .cons = &ulite_console, -#endif -}; - -static int __devinit ulite_probe(struct platform_device *pdev) -{ - struct resource *res, *res2; - struct uart_port *port; - - if (pdev->id < 0 || pdev->id >= ULITE_NR_UARTS) - return -EINVAL; - - if (ports[pdev->id].membase) - return -EBUSY; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) - return -ENODEV; - - port = &ports[pdev->id]; - - port->fifosize = 16; - port->regshift = 2; - port->iotype = UPIO_MEM; - port->iobase = 1; /* mark port in use */ - port->mapbase = res->start; - port->membase = 0; - port->ops = &ulite_ops; - port->irq = res2->start; - port->flags = UPF_BOOT_AUTOCONF; - port->dev = &pdev->dev; - port->type = PORT_UNKNOWN; - port->line = pdev->id; - - uart_add_one_port(&ulite_uart_driver, port); - platform_set_drvdata(pdev, port); - - return 0; -} - -static int ulite_remove(struct platform_device *pdev) -{ - struct uart_port *port = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - - if (port) - uart_remove_one_port(&ulite_uart_driver, port); - - /* mark port as free */ - port->membase = 0; - - return 0; -} - -static struct platform_driver ulite_platform_driver = { - .probe = ulite_probe, - .remove = ulite_remove, - .driver = { - .owner = THIS_MODULE, - .name = "uartlite", - }, -}; - -int __init ulite_init(void) -{ - int ret; - - ret = uart_register_driver(&ulite_uart_driver); - if (ret) - return ret; - - ret = platform_driver_register(&ulite_platform_driver); - if (ret) - uart_unregister_driver(&ulite_uart_driver); - - return ret; -} - -void __exit ulite_exit(void) -{ - platform_driver_unregister(&ulite_platform_driver); - uart_unregister_driver(&ulite_uart_driver); -} - -module_init(ulite_init); -module_exit(ulite_exit); - -MODULE_AUTHOR("Peter Korsgaard "); -MODULE_DESCRIPTION("Xilinx uartlite serial driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/spi/pxa2xx_spi.c b/trunk/drivers/spi/pxa2xx_spi.c index 494d9b856488..72025df5561d 100644 --- a/trunk/drivers/spi/pxa2xx_spi.c +++ b/trunk/drivers/spi/pxa2xx_spi.c @@ -148,7 +148,7 @@ struct chip_data { void (*cs_control)(u32 command); }; -static void pump_messages(struct work_struct *work); +static void pump_messages(void *data); static int flush(struct driver_data *drv_data) { @@ -884,10 +884,9 @@ static void pump_transfers(unsigned long data) } } -static void pump_messages(struct work_struct *work) +static void pump_messages(void *data) { - struct driver_data *drv_data = - container_of(work, struct driver_data, pump_messages); + struct driver_data *drv_data = data; unsigned long flags; /* Lock queue and check for queue work */ @@ -1099,7 +1098,7 @@ static int init_queue(struct driver_data *drv_data) tasklet_init(&drv_data->pump_transfers, pump_transfers, (unsigned long)drv_data); - INIT_WORK(&drv_data->pump_messages, pump_messages); + INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); drv_data->workqueue = create_singlethread_workqueue( drv_data->master->cdev.dev->bus_id); if (drv_data->workqueue == NULL) diff --git a/trunk/drivers/spi/spi.c b/trunk/drivers/spi/spi.c index 270e6211c2e3..c3c0626f550b 100644 --- a/trunk/drivers/spi/spi.c +++ b/trunk/drivers/spi/spi.c @@ -360,13 +360,12 @@ spi_alloc_master(struct device *dev, unsigned size) if (!dev) return NULL; - master = kzalloc(size + sizeof *master, GFP_KERNEL); + master = kzalloc(size + sizeof *master, SLAB_KERNEL); if (!master) return NULL; class_device_initialize(&master->cdev); master->cdev.class = &spi_master_class; - kobj_set_kset_s(&master->cdev, spi_master_class.subsys); master->cdev.dev = get_device(dev); spi_master_set_devdata(master, &master[1]); @@ -448,9 +447,7 @@ static int __unregister(struct device *dev, void *unused) */ void spi_unregister_master(struct spi_master *master) { - int dummy; - - dummy = device_for_each_child(master->cdev.dev, NULL, __unregister); + (void) device_for_each_child(master->cdev.dev, NULL, __unregister); class_device_unregister(&master->cdev); } EXPORT_SYMBOL_GPL(spi_unregister_master); @@ -466,13 +463,15 @@ EXPORT_SYMBOL_GPL(spi_unregister_master); */ struct spi_master *spi_busnum_to_master(u16 bus_num) { - char name[9]; - struct kobject *bus; - - snprintf(name, sizeof name, "spi%u", bus_num); - bus = kset_find_obj(&spi_master_class.subsys.kset, name); - if (bus) - return container_of(bus, struct spi_master, cdev.kobj); + if (bus_num) { + char name[8]; + struct kobject *bus; + + snprintf(name, sizeof name, "spi%u", bus_num); + bus = kset_find_obj(&spi_master_class.subsys.kset, name); + if (bus) + return container_of(bus, struct spi_master, cdev.kobj); + } return NULL; } EXPORT_SYMBOL_GPL(spi_busnum_to_master); @@ -608,7 +607,7 @@ static int __init spi_init(void) { int status; - buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); + buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL); if (!buf) { status = -ENOMEM; goto err0; diff --git a/trunk/drivers/spi/spi_bitbang.c b/trunk/drivers/spi/spi_bitbang.c index 57289b61d0be..a23862ef72b2 100644 --- a/trunk/drivers/spi/spi_bitbang.c +++ b/trunk/drivers/spi/spi_bitbang.c @@ -196,7 +196,7 @@ int spi_bitbang_setup(struct spi_device *spi) return -EINVAL; if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); + cs = kzalloc(sizeof *cs, SLAB_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; @@ -265,10 +265,9 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) * Drivers can provide word-at-a-time i/o primitives, or provide * transfer-at-a-time ones to leverage dma or fifo hardware. */ -static void bitbang_work(struct work_struct *work) +static void bitbang_work(void *_bitbang) { - struct spi_bitbang *bitbang = - container_of(work, struct spi_bitbang, work); + struct spi_bitbang *bitbang = _bitbang; unsigned long flags; spin_lock_irqsave(&bitbang->lock, flags); @@ -457,7 +456,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) if (!bitbang->master || !bitbang->chipselect) return -EINVAL; - INIT_WORK(&bitbang->work, bitbang_work); + INIT_WORK(&bitbang->work, bitbang_work, bitbang); spin_lock_init(&bitbang->lock); INIT_LIST_HEAD(&bitbang->queue); diff --git a/trunk/drivers/spi/spi_butterfly.c b/trunk/drivers/spi/spi_butterfly.c index 312987a03210..c2f601f8e4f2 100644 --- a/trunk/drivers/spi/spi_butterfly.c +++ b/trunk/drivers/spi/spi_butterfly.c @@ -251,8 +251,6 @@ static void butterfly_attach(struct parport *p) * setting up a platform device like this is an ugly kluge... */ pdev = platform_device_register_simple("butterfly", -1, NULL, 0); - if (IS_ERR(pdev)) - return; master = spi_alloc_master(&pdev->dev, sizeof *pp); if (!master) { diff --git a/trunk/drivers/usb/atm/cxacru.c b/trunk/drivers/usb/atm/cxacru.c index 3dfa3e40e148..e6565633ba0f 100644 --- a/trunk/drivers/usb/atm/cxacru.c +++ b/trunk/drivers/usb/atm/cxacru.c @@ -158,7 +158,7 @@ struct cxacru_data { const struct cxacru_modem_type *modem_type; int line_status; - struct delayed_work poll_work; + struct work_struct poll_work; /* contol handles */ struct mutex cm_serialize; @@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance) return 0; } -static void cxacru_poll_status(struct work_struct *work); +static void cxacru_poll_status(struct cxacru_data *instance); static int cxacru_atm_start(struct usbatm_data *usbatm_instance, struct atm_dev *atm_dev) @@ -376,14 +376,12 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance, } /* Start status polling */ - cxacru_poll_status(&instance->poll_work.work); + cxacru_poll_status(instance); return 0; } -static void cxacru_poll_status(struct work_struct *work) +static void cxacru_poll_status(struct cxacru_data *instance) { - struct cxacru_data *instance = - container_of(work, struct cxacru_data, poll_work.work); u32 buf[CXINF_MAX] = {}; struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; @@ -722,7 +720,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, mutex_init(&instance->cm_serialize); - INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); + INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); usbatm_instance->driver_data = instance; diff --git a/trunk/drivers/usb/atm/speedtch.c b/trunk/drivers/usb/atm/speedtch.c index 8ed6c75adf0f..a823486495c3 100644 --- a/trunk/drivers/usb/atm/speedtch.c +++ b/trunk/drivers/usb/atm/speedtch.c @@ -142,7 +142,7 @@ struct speedtch_instance_data { struct speedtch_params params; /* set in probe, constant afterwards */ - struct delayed_work status_checker; + struct work_struct status_checker; unsigned char last_status; @@ -498,11 +498,8 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance) return ret; } -static void speedtch_check_status(struct work_struct *work) +static void speedtch_check_status(struct speedtch_instance_data *instance) { - struct speedtch_instance_data *instance = - container_of(work, struct speedtch_instance_data, - status_checker.work); struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; unsigned char *buf = instance->scratch_buffer; @@ -579,7 +576,7 @@ static void speedtch_status_poll(unsigned long data) { struct speedtch_instance_data *instance = (void *)data; - schedule_delayed_work(&instance->status_checker, 0); + schedule_work(&instance->status_checker); /* The following check is racy, but the race is harmless */ if (instance->poll_delay < MAX_POLL_DELAY) @@ -599,7 +596,7 @@ static void speedtch_resubmit_int(unsigned long data) if (int_urb) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); if (!ret) - schedule_delayed_work(&instance->status_checker, 0); + schedule_work(&instance->status_checker); else { atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); @@ -643,7 +640,7 @@ static void speedtch_handle_int(struct urb *int_urb) if ((int_urb = instance->int_urb)) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); - schedule_delayed_work(&instance->status_checker, 0); + schedule_work(&instance->status_checker); if (ret < 0) { atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); goto fail; @@ -858,7 +855,7 @@ static int speedtch_bind(struct usbatm_data *usbatm, usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); - INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status); + INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); instance->status_checker.timer.function = speedtch_status_poll; instance->status_checker.timer.data = (unsigned long)instance; diff --git a/trunk/drivers/usb/atm/ueagle-atm.c b/trunk/drivers/usb/atm/ueagle-atm.c index dae4ef1e8fe5..c137c041f7a4 100644 --- a/trunk/drivers/usb/atm/ueagle-atm.c +++ b/trunk/drivers/usb/atm/ueagle-atm.c @@ -64,8 +64,6 @@ #include #include #include -#include - #include #include "usbatm.h" @@ -657,9 +655,9 @@ static int request_dsp(struct uea_softc *sc) /* * The uea_load_page() function must be called within a process context */ -static void uea_load_page(struct work_struct *work) +static void uea_load_page(void *xsc) { - struct uea_softc *sc = container_of(work, struct uea_softc, task); + struct uea_softc *sc = xsc; u16 pageno = sc->pageno; u16 ovl = sc->ovl; struct block_info bi; @@ -1350,7 +1348,7 @@ static int uea_boot(struct uea_softc *sc) uea_enters(INS_TO_USBDEV(sc)); - INIT_WORK(&sc->task, uea_load_page); + INIT_WORK(&sc->task, uea_load_page, sc); init_waitqueue_head(&sc->sync_q); init_waitqueue_head(&sc->cmv_ack_wait); diff --git a/trunk/drivers/usb/class/cdc-acm.c b/trunk/drivers/usb/class/cdc-acm.c index 7f1fa956dcdb..ec3438dc8ee5 100644 --- a/trunk/drivers/usb/class/cdc-acm.c +++ b/trunk/drivers/usb/class/cdc-acm.c @@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb) schedule_work(&acm->work); } -static void acm_softint(struct work_struct *work) +static void acm_softint(void *private) { - struct acm *acm = container_of(work, struct acm, work); + struct acm *acm = private; dbg("Entering acm_softint."); if (!ACM_READY(acm)) @@ -927,7 +927,7 @@ static int acm_probe (struct usb_interface *intf, acm->rx_buflimit = num_rx_buf; acm->urb_task.func = acm_rx_tasklet; acm->urb_task.data = (unsigned long) acm; - INIT_WORK(&acm->work, acm_softint); + INIT_WORK(&acm->work, acm_softint, acm); spin_lock_init(&acm->throttle_lock); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); diff --git a/trunk/drivers/usb/core/buffer.c b/trunk/drivers/usb/core/buffer.c index c3915dc28608..840442a25b61 100644 --- a/trunk/drivers/usb/core/buffer.c +++ b/trunk/drivers/usb/core/buffer.c @@ -93,7 +93,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd) } -/* sometimes alloc/free could use kmalloc with GFP_DMA, for +/* sometimes alloc/free could use kmalloc with SLAB_DMA, for * better sharing and to leverage mm/slab.c intelligence. */ diff --git a/trunk/drivers/usb/core/hub.c b/trunk/drivers/usb/core/hub.c index 2651c2e2a89f..0ce393eb3c4b 100644 --- a/trunk/drivers/usb/core/hub.c +++ b/trunk/drivers/usb/core/hub.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -69,7 +68,7 @@ struct usb_hub { unsigned has_indicators:1; u8 indicator[USB_MAXCHILDREN]; - struct delayed_work leds; + struct work_struct leds; }; @@ -219,10 +218,9 @@ static void set_port_led( #define LED_CYCLE_PERIOD ((2*HZ)/3) -static void led_work (struct work_struct *work) +static void led_work (void *__hub) { - struct usb_hub *hub = - container_of(work, struct usb_hub, leds.work); + struct usb_hub *hub = __hub; struct usb_device *hdev = hub->hdev; unsigned i; unsigned changed = 0; @@ -407,10 +405,9 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) * talking to TTs must queue control transfers (not just bulk and iso), so * both can talk to the same hub concurrently. */ -static void hub_tt_kevent (struct work_struct *work) +static void hub_tt_kevent (void *arg) { - struct usb_hub *hub = - container_of(work, struct usb_hub, tt.kevent); + struct usb_hub *hub = arg; unsigned long flags; spin_lock_irqsave (&hub->tt.lock, flags); @@ -461,7 +458,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) * since each TT has "at least two" buffers that can need it (and * there can be many TTs per hub). even if they're uncommon. */ - if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { + if ((clear = kmalloc (sizeof *clear, SLAB_ATOMIC)) == NULL) { dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); /* FIXME recover somehow ... RESET_TT? */ return; @@ -697,7 +694,7 @@ static int hub_configure(struct usb_hub *hub, spin_lock_init (&hub->tt.lock); INIT_LIST_HEAD (&hub->tt.clear_list); - INIT_WORK (&hub->tt.kevent, hub_tt_kevent); + INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); switch (hdev->descriptor.bDeviceProtocol) { case 0: break; @@ -941,7 +938,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) INIT_LIST_HEAD(&hub->event_list); hub->intfdev = &intf->dev; hub->hdev = hdev; - INIT_DELAYED_WORK(&hub->leds, led_work); + INIT_WORK(&hub->leds, led_work, hub); usb_set_intfdata (intf, hub); intf->needs_remote_wakeup = 1; @@ -2372,7 +2369,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) struct usb_qualifier_descriptor *qual; int status; - qual = kmalloc (sizeof *qual, GFP_KERNEL); + qual = kmalloc (sizeof *qual, SLAB_KERNEL); if (qual == NULL) return; @@ -2384,7 +2381,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) /* hub LEDs are probably harder to miss than syslog */ if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; - schedule_delayed_work (&hub->leds, 0); + schedule_work (&hub->leds); } } kfree(qual); @@ -2558,7 +2555,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_AMBER_BLINK; - schedule_delayed_work (&hub->leds, 0); + schedule_work (&hub->leds); } status = -ENOTCONN; /* Don't retry */ goto loop_disable; @@ -2923,7 +2920,7 @@ static int config_descriptors_changed(struct usb_device *udev) if (len < le16_to_cpu(udev->config[index].desc.wTotalLength)) len = le16_to_cpu(udev->config[index].desc.wTotalLength); } - buf = kmalloc (len, GFP_KERNEL); + buf = kmalloc (len, SLAB_KERNEL); if (buf == NULL) { dev_err(&udev->dev, "no mem to re-read configs after reset\n"); /* assume the worst */ diff --git a/trunk/drivers/usb/core/message.c b/trunk/drivers/usb/core/message.c index 149aa8bfb1fe..29b0fa9ff9d0 100644 --- a/trunk/drivers/usb/core/message.c +++ b/trunk/drivers/usb/core/message.c @@ -488,7 +488,7 @@ void usb_sg_wait (struct usb_sg_request *io) int retval; io->urbs [i]->dev = io->dev; - retval = usb_submit_urb (io->urbs [i], GFP_ATOMIC); + retval = usb_submit_urb (io->urbs [i], SLAB_ATOMIC); /* after we submit, let completions or cancelations fire; * we handshake using io->status. @@ -1501,10 +1501,9 @@ struct set_config_request { }; /* Worker routine for usb_driver_set_configuration() */ -static void driver_set_config_work(struct work_struct *work) +static void driver_set_config_work(void *_req) { - struct set_config_request *req = - container_of(work, struct set_config_request, work); + struct set_config_request *req = _req; usb_lock_device(req->udev); usb_set_configuration(req->udev, req->config); @@ -1542,7 +1541,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config) return -ENOMEM; req->udev = udev; req->config = config; - INIT_WORK(&req->work, driver_set_config_work); + INIT_WORK(&req->work, driver_set_config_work, req); usb_get_dev(udev); if (!schedule_work(&req->work)) { diff --git a/trunk/drivers/usb/core/usb.c b/trunk/drivers/usb/core/usb.c index 02426d0b9a34..81cb52564e68 100644 --- a/trunk/drivers/usb/core/usb.c +++ b/trunk/drivers/usb/core/usb.c @@ -203,10 +203,9 @@ static void ksuspend_usb_cleanup(void) #ifdef CONFIG_USB_SUSPEND /* usb_autosuspend_work - callback routine to autosuspend a USB device */ -static void usb_autosuspend_work(struct work_struct *work) +static void usb_autosuspend_work(void *_udev) { - struct usb_device *udev = - container_of(work, struct usb_device, autosuspend.work); + struct usb_device *udev = _udev; usb_pm_lock(udev); udev->auto_pm = 1; @@ -216,7 +215,7 @@ static void usb_autosuspend_work(struct work_struct *work) #else -static void usb_autosuspend_work(struct work_struct *work) +static void usb_autosuspend_work(void *_udev) {} #endif /* CONFIG_USB_SUSPEND */ @@ -305,7 +304,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) #ifdef CONFIG_PM mutex_init(&dev->pm_mutex); - INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); + INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); #endif return dev; } diff --git a/trunk/drivers/usb/gadget/ether.c b/trunk/drivers/usb/gadget/ether.c index d15bf22b9a03..3bd1dfe565c1 100644 --- a/trunk/drivers/usb/gadget/ether.c +++ b/trunk/drivers/usb/gadget/ether.c @@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags) spin_unlock_irqrestore(&dev->req_lock, flags); } -static void eth_work (struct work_struct *work) +static void eth_work (void *_dev) { - struct eth_dev *dev = container_of(work, struct eth_dev, work); + struct eth_dev *dev = _dev; if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { if (netif_running (dev->net)) @@ -2398,7 +2398,7 @@ eth_bind (struct usb_gadget *gadget) dev = netdev_priv(net); spin_lock_init (&dev->lock); spin_lock_init (&dev->req_lock); - INIT_WORK (&dev->work, eth_work); + INIT_WORK (&dev->work, eth_work, dev); INIT_LIST_HEAD (&dev->tx_reqs); INIT_LIST_HEAD (&dev->rx_reqs); diff --git a/trunk/drivers/usb/gadget/file_storage.c b/trunk/drivers/usb/gadget/file_storage.c index c98316ce8384..8b975d15538d 100644 --- a/trunk/drivers/usb/gadget/file_storage.c +++ b/trunk/drivers/usb/gadget/file_storage.c @@ -250,7 +250,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/usb/gadget/gmidi.c b/trunk/drivers/usb/gadget/gmidi.c index 31351826f2ba..64554acad63f 100644 --- a/trunk/drivers/usb/gadget/gmidi.c +++ b/trunk/drivers/usb/gadget/gmidi.c @@ -1236,7 +1236,7 @@ static int __devinit gmidi_bind(struct usb_gadget *gadget) /* ok, we made sense of the hardware ... */ - dev = kzalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), SLAB_KERNEL); if (!dev) { return -ENOMEM; } diff --git a/trunk/drivers/usb/gadget/goku_udc.c b/trunk/drivers/usb/gadget/goku_udc.c index 805a9826842d..a3076da3f4eb 100644 --- a/trunk/drivers/usb/gadget/goku_udc.c +++ b/trunk/drivers/usb/gadget/goku_udc.c @@ -1864,7 +1864,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* alloc, and start init */ - dev = kmalloc (sizeof *dev, GFP_KERNEL); + dev = kmalloc (sizeof *dev, SLAB_KERNEL); if (dev == NULL){ pr_debug("enomem %s\n", pci_name(pdev)); retval = -ENOMEM; diff --git a/trunk/drivers/usb/gadget/inode.c b/trunk/drivers/usb/gadget/inode.c index 3fb1044a4db0..86924f9cdd7e 100644 --- a/trunk/drivers/usb/gadget/inode.c +++ b/trunk/drivers/usb/gadget/inode.c @@ -412,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ value = -ENOMEM; - kbuf = kmalloc (len, GFP_KERNEL); + kbuf = kmalloc (len, SLAB_KERNEL); if (unlikely (!kbuf)) goto free1; @@ -456,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ value = -ENOMEM; - kbuf = kmalloc (len, GFP_KERNEL); + kbuf = kmalloc (len, SLAB_KERNEL); if (!kbuf) goto free1; if (copy_from_user (kbuf, buf, len)) { @@ -1898,7 +1898,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) buf += 4; length -= 4; - kbuf = kmalloc (length, GFP_KERNEL); + kbuf = kmalloc (length, SLAB_KERNEL); if (!kbuf) return -ENOMEM; if (copy_from_user (kbuf, buf, length)) { diff --git a/trunk/drivers/usb/gadget/net2280.c b/trunk/drivers/usb/gadget/net2280.c index 3024c679e38e..0b590831582c 100644 --- a/trunk/drivers/usb/gadget/net2280.c +++ b/trunk/drivers/usb/gadget/net2280.c @@ -2861,7 +2861,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) } /* alloc, and start init */ - dev = kzalloc (sizeof *dev, GFP_KERNEL); + dev = kzalloc (sizeof *dev, SLAB_KERNEL); if (dev == NULL){ retval = -ENOMEM; goto done; diff --git a/trunk/drivers/usb/gadget/omap_udc.c b/trunk/drivers/usb/gadget/omap_udc.c index 030d87c28c2f..48a09fd89d18 100644 --- a/trunk/drivers/usb/gadget/omap_udc.c +++ b/trunk/drivers/usb/gadget/omap_udc.c @@ -2581,7 +2581,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv) /* UDC_PULLUP_EN gates the chip clock */ // OTG_SYSCON_1_REG |= DEV_IDLE_EN; - udc = kzalloc(sizeof(*udc), GFP_KERNEL); + udc = kzalloc(sizeof(*udc), SLAB_KERNEL); if (!udc) return -ENOMEM; diff --git a/trunk/drivers/usb/gadget/zero.c b/trunk/drivers/usb/gadget/zero.c index 40710ea1b490..0f809dd68492 100644 --- a/trunk/drivers/usb/gadget/zero.c +++ b/trunk/drivers/usb/gadget/zero.c @@ -1190,7 +1190,7 @@ zero_bind (struct usb_gadget *gadget) /* ok, we made sense of the hardware ... */ - dev = kzalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), SLAB_KERNEL); if (!dev) return -ENOMEM; spin_lock_init (&dev->lock); diff --git a/trunk/drivers/usb/host/ehci-dbg.c b/trunk/drivers/usb/host/ehci-dbg.c index 56349d21e6ea..34b7a31cd85b 100644 --- a/trunk/drivers/usb/host/ehci-dbg.c +++ b/trunk/drivers/usb/host/ehci-dbg.c @@ -492,7 +492,7 @@ show_periodic (struct class_device *class_dev, char *buf) unsigned i; __le32 tag; - if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC))) + if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC))) return 0; seen_count = 0; diff --git a/trunk/drivers/usb/host/hc_crisv10.c b/trunk/drivers/usb/host/hc_crisv10.c index 9325e46a68c0..87eca6aeacf2 100644 --- a/trunk/drivers/usb/host/hc_crisv10.c +++ b/trunk/drivers/usb/host/hc_crisv10.c @@ -188,7 +188,7 @@ static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0); #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \ {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);} -#define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) +#define SLAB_FLAG (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL) #define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) /* Most helpful debugging aid */ @@ -275,13 +275,13 @@ static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4))); static int zout_buffer[4] __attribute__ ((aligned (4))); /* Cache for allocating new EP and SB descriptors. */ -static struct kmem_cache *usb_desc_cache; +static kmem_cache_t *usb_desc_cache; /* Cache for the registers allocated in the top half. */ -static struct kmem_cache *top_half_reg_cache; +static kmem_cache_t *top_half_reg_cache; /* Cache for the data allocated in the isoc descr top half. */ -static struct kmem_cache *isoc_compl_cache; +static kmem_cache_t *isoc_compl_cache; static struct usb_bus *etrax_usb_bus; @@ -1743,7 +1743,7 @@ static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc) *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do); - comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC); + comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC); assert(comp_data != NULL); INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data); @@ -3010,7 +3010,7 @@ static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid) if (!urb->iso_frame_desc[i].length) continue; - next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC); + next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC); assert(next_sb_desc != NULL); if (urb->iso_frame_desc[i].length > 0) { @@ -3063,7 +3063,7 @@ static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid) if (TxIsocEPList[epid].sub == 0) { dbg_isoc("Isoc traffic not already running, allocating SB"); - next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC); + next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC); assert(next_sb_desc != NULL); next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) | @@ -3317,7 +3317,7 @@ static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc) restore_flags(flags); - reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, GFP_ATOMIC); + reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC); assert(reg != NULL); diff --git a/trunk/drivers/usb/host/ohci-dbg.c b/trunk/drivers/usb/host/ohci-dbg.c index 0f47a57dac28..8293c1d4be3f 100644 --- a/trunk/drivers/usb/host/ohci-dbg.c +++ b/trunk/drivers/usb/host/ohci-dbg.c @@ -505,7 +505,7 @@ show_periodic (struct class_device *class_dev, char *buf) char *next; unsigned i; - if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC))) + if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC))) return 0; seen_count = 0; diff --git a/trunk/drivers/usb/host/ohci-pnx4008.c b/trunk/drivers/usb/host/ohci-pnx4008.c index 7f26f9bdbaf1..2dbb77414905 100644 --- a/trunk/drivers/usb/host/ohci-pnx4008.c +++ b/trunk/drivers/usb/host/ohci-pnx4008.c @@ -134,7 +134,7 @@ static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind) { struct i2c_client *c; - c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL); + c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL); if (!c) return -ENOMEM; diff --git a/trunk/drivers/usb/host/u132-hcd.c b/trunk/drivers/usb/host/u132-hcd.c index a9d7119e3176..ef54e310bfc4 100644 --- a/trunk/drivers/usb/host/u132-hcd.c +++ b/trunk/drivers/usb/host/u132-hcd.c @@ -163,7 +163,7 @@ struct u132_endp { u16 queue_next; struct urb *urb_list[ENDP_QUEUE_SIZE]; struct list_head urb_more; - struct delayed_work scheduler; + struct work_struct scheduler; }; struct u132_ring { unsigned in_use:1; @@ -171,7 +171,7 @@ struct u132_ring { u8 number; struct u132 *u132; struct u132_endp *curr_endp; - struct delayed_work scheduler; + struct work_struct scheduler; }; #define OHCI_QUIRK_AMD756 0x01 #define OHCI_QUIRK_SUPERIO 0x02 @@ -198,7 +198,7 @@ struct u132 { u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; int flags; unsigned long next_statechange; - struct delayed_work monitor; + struct work_struct monitor; int num_endpoints; struct u132_addr addr[MAX_U132_ADDRS]; struct u132_udev udev[MAX_U132_UDEVS]; @@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, if (delta > 0) { if (queue_delayed_work(workqueue, &ring->scheduler, delta)) return; - } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) + } else if (queue_work(workqueue, &ring->scheduler)) return; kref_put(&u132->kref, u132_hcd_delete); return; @@ -389,8 +389,12 @@ static inline void u132_endp_init_kref(struct u132 *u132, static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, unsigned int delta) { - if (queue_delayed_work(workqueue, &endp->scheduler, delta)) - kref_get(&endp->kref); + if (delta > 0) { + if (queue_delayed_work(workqueue, &endp->scheduler, delta)) + kref_get(&endp->kref); + } else if (queue_work(workqueue, &endp->scheduler)) + kref_get(&endp->kref); + return; } static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) @@ -406,14 +410,24 @@ static inline void u132_monitor_put_kref(struct u132 *u132) static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) { - if (queue_delayed_work(workqueue, &u132->monitor, delta)) - kref_get(&u132->kref); + if (delta > 0) { + if (queue_delayed_work(workqueue, &u132->monitor, delta)) { + kref_get(&u132->kref); + } + } else if (queue_work(workqueue, &u132->monitor)) + kref_get(&u132->kref); + return; } static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) { - if (!queue_delayed_work(workqueue, &u132->monitor, delta)) - kref_put(&u132->kref, u132_hcd_delete); + if (delta > 0) { + if (queue_delayed_work(workqueue, &u132->monitor, delta)) + return; + } else if (queue_work(workqueue, &u132->monitor)) + return; + kref_put(&u132->kref, u132_hcd_delete); + return; } static void u132_monitor_cancel_work(struct u132 *u132) @@ -475,9 +489,9 @@ static int read_roothub_info(struct u132 *u132) return 0; } -static void u132_hcd_monitor_work(struct work_struct *work) +static void u132_hcd_monitor_work(void *data) { - struct u132 *u132 = container_of(work, struct u132, monitor.work); + struct u132 *u132 = data; if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); @@ -1301,14 +1315,15 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf, } } +static void u132_hcd_ring_work_scheduler(void *data); +static void u132_hcd_endp_work_scheduler(void *data); /* * this work function is only executed from the work queue * */ -static void u132_hcd_ring_work_scheduler(struct work_struct *work) +static void u132_hcd_ring_work_scheduler(void *data) { - struct u132_ring *ring = - container_of(work, struct u132_ring, scheduler.work); + struct u132_ring *ring = data; struct u132 *u132 = ring->u132; down(&u132->scheduler_lock); if (ring->in_use) { @@ -1367,11 +1382,10 @@ static void u132_hcd_ring_work_scheduler(struct work_struct *work) } } -static void u132_hcd_endp_work_scheduler(struct work_struct *work) +static void u132_hcd_endp_work_scheduler(void *data) { struct u132_ring *ring; - struct u132_endp *endp = - container_of(work, struct u132_endp, scheduler.work); + struct u132_endp *endp = data; struct u132 *u132 = endp->u132; down(&u132->scheduler_lock); ring = endp->ring; @@ -1929,7 +1943,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132, if (!endp) { return -ENOMEM; } - INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); + INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); spin_lock_init(&endp->queue_lock.slock); INIT_LIST_HEAD(&endp->urb_more); ring = endp->ring = &u132->ring[0]; @@ -2018,7 +2032,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, if (!endp) { return -ENOMEM; } - INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); + INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); spin_lock_init(&endp->queue_lock.slock); INIT_LIST_HEAD(&endp->urb_more); endp->dequeueing = 0; @@ -2103,7 +2117,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132, if (!endp) { return -ENOMEM; } - INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); + INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); spin_lock_init(&endp->queue_lock.slock); INIT_LIST_HEAD(&endp->urb_more); ring = endp->ring = &u132->ring[0]; @@ -3082,10 +3096,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev) ring->number = rings + 1; ring->length = 0; ring->curr_endp = NULL; - INIT_DELAYED_WORK(&ring->scheduler, - u132_hcd_ring_work_scheduler); + INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, + (void *)ring); } down(&u132->sw_lock); - INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); + INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); while (ports-- > 0) { struct u132_port *port = &u132->port[ports]; port->u132 = u132; diff --git a/trunk/drivers/usb/host/uhci-hcd.c b/trunk/drivers/usb/host/uhci-hcd.c index e87692c31be4..226bf3de8edd 100644 --- a/trunk/drivers/usb/host/uhci-hcd.c +++ b/trunk/drivers/usb/host/uhci-hcd.c @@ -81,7 +81,7 @@ MODULE_PARM_DESC(debug, "Debug level"); static char *errbuf; #define ERRBUF_LEN (32 * 1024) -static struct kmem_cache *uhci_up_cachep; /* urb_priv */ +static kmem_cache_t *uhci_up_cachep; /* urb_priv */ static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); static void wakeup_rh(struct uhci_hcd *uhci); diff --git a/trunk/drivers/usb/host/uhci-q.c b/trunk/drivers/usb/host/uhci-q.c index 30b88459ac7d..06115f22a4fa 100644 --- a/trunk/drivers/usb/host/uhci-q.c +++ b/trunk/drivers/usb/host/uhci-q.c @@ -498,7 +498,7 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, { struct urb_priv *urbp; - urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC); + urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); if (!urbp) return NULL; diff --git a/trunk/drivers/usb/input/acecad.c b/trunk/drivers/usb/input/acecad.c index 909138e5aa04..0096373b5f98 100644 --- a/trunk/drivers/usb/input/acecad.c +++ b/trunk/drivers/usb/input/acecad.c @@ -152,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_ if (!acecad || !input_dev) goto fail1; - acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma); + acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma); if (!acecad->data) goto fail1; diff --git a/trunk/drivers/usb/input/aiptek.c b/trunk/drivers/usb/input/aiptek.c index 9f52429ce654..bf428184608f 100644 --- a/trunk/drivers/usb/input/aiptek.c +++ b/trunk/drivers/usb/input/aiptek.c @@ -1988,7 +1988,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) goto fail1; aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH, - GFP_ATOMIC, &aiptek->data_dma); + SLAB_ATOMIC, &aiptek->data_dma); if (!aiptek->data) goto fail1; diff --git a/trunk/drivers/usb/input/ati_remote.c b/trunk/drivers/usb/input/ati_remote.c index b724e36f7b92..ff23318dc301 100644 --- a/trunk/drivers/usb/input/ati_remote.c +++ b/trunk/drivers/usb/input/ati_remote.c @@ -592,7 +592,7 @@ static void ati_remote_irq_in(struct urb *urb) __FUNCTION__, urb->status); } - retval = usb_submit_urb(urb, GFP_ATOMIC); + retval = usb_submit_urb(urb, SLAB_ATOMIC); if (retval) dev_err(&ati_remote->interface->dev, "%s: usb_submit_urb()=%d\n", __FUNCTION__, retval); @@ -604,12 +604,12 @@ static void ati_remote_irq_in(struct urb *urb) static int ati_remote_alloc_buffers(struct usb_device *udev, struct ati_remote *ati_remote) { - ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC, + ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC, &ati_remote->inbuf_dma); if (!ati_remote->inbuf) return -1; - ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC, + ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC, &ati_remote->outbuf_dma); if (!ati_remote->outbuf) return -1; diff --git a/trunk/drivers/usb/input/hid-core.c b/trunk/drivers/usb/input/hid-core.c index f1d0e1d69828..a49644b7c58e 100644 --- a/trunk/drivers/usb/input/hid-core.c +++ b/trunk/drivers/usb/input/hid-core.c @@ -969,10 +969,9 @@ static void hid_retry_timeout(unsigned long _hid) } /* Workqueue routine to reset the device or clear a halt */ -static void hid_reset(struct work_struct *work) +static void hid_reset(void *_hid) { - struct hid_device *hid = - container_of(work, struct hid_device, reset_work); + struct hid_device *hid = (struct hid_device *) _hid; int rc_lock, rc = 0; if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { @@ -1079,7 +1078,7 @@ static void hid_irq_in(struct urb *urb) warn("input irq status %d received", urb->status); } - status = usb_submit_urb(urb, GFP_ATOMIC); + status = usb_submit_urb(urb, SLAB_ATOMIC); if (status) { clear_bit(HID_IN_RUNNING, &hid->iofl); if (status != -EPERM) { @@ -1864,13 +1863,13 @@ static void hid_find_max_report(struct hid_device *hid, unsigned int type, int * static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) { - if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->inbuf_dma))) + if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->inbuf_dma))) return -1; - if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->outbuf_dma))) + if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->outbuf_dma))) return -1; - if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), GFP_ATOMIC, &hid->cr_dma))) + if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), SLAB_ATOMIC, &hid->cr_dma))) return -1; - if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->ctrlbuf_dma))) + if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->ctrlbuf_dma))) return -1; return 0; @@ -2044,7 +2043,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) init_waitqueue_head(&hid->wait); - INIT_WORK(&hid->reset_work, hid_reset); + INIT_WORK(&hid->reset_work, hid_reset, hid); setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); spin_lock_init(&hid->inlock); diff --git a/trunk/drivers/usb/input/keyspan_remote.c b/trunk/drivers/usb/input/keyspan_remote.c index 98bd323369c7..50aa8108a50b 100644 --- a/trunk/drivers/usb/input/keyspan_remote.c +++ b/trunk/drivers/usb/input/keyspan_remote.c @@ -456,7 +456,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic remote->in_endpoint = endpoint; remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */ - remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma); + remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, SLAB_ATOMIC, &remote->in_dma); if (!remote->in_buffer) { retval = -ENOMEM; goto fail1; diff --git a/trunk/drivers/usb/input/mtouchusb.c b/trunk/drivers/usb/input/mtouchusb.c index 92c4e07da4c8..79a85d46cb13 100644 --- a/trunk/drivers/usb/input/mtouchusb.c +++ b/trunk/drivers/usb/input/mtouchusb.c @@ -164,7 +164,7 @@ static int mtouchusb_alloc_buffers(struct usb_device *udev, struct mtouch_usb *m dbg("%s - called", __FUNCTION__); mtouch->data = usb_buffer_alloc(udev, MTOUCHUSB_REPORT_DATA_SIZE, - GFP_ATOMIC, &mtouch->data_dma); + SLAB_ATOMIC, &mtouch->data_dma); if (!mtouch->data) return -1; diff --git a/trunk/drivers/usb/input/powermate.c b/trunk/drivers/usb/input/powermate.c index fea97e5437f8..0bf91778c40d 100644 --- a/trunk/drivers/usb/input/powermate.c +++ b/trunk/drivers/usb/input/powermate.c @@ -277,12 +277,12 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm) { pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX, - GFP_ATOMIC, &pm->data_dma); + SLAB_ATOMIC, &pm->data_dma); if (!pm->data) return -1; pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)), - GFP_ATOMIC, &pm->configcr_dma); + SLAB_ATOMIC, &pm->configcr_dma); if (!pm->configcr) return -1; diff --git a/trunk/drivers/usb/input/touchkitusb.c b/trunk/drivers/usb/input/touchkitusb.c index 2a314b065922..05c0d1ca39ab 100644 --- a/trunk/drivers/usb/input/touchkitusb.c +++ b/trunk/drivers/usb/input/touchkitusb.c @@ -248,7 +248,7 @@ static int touchkit_alloc_buffers(struct usb_device *udev, struct touchkit_usb *touchkit) { touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE, - GFP_ATOMIC, &touchkit->data_dma); + SLAB_ATOMIC, &touchkit->data_dma); if (!touchkit->data) return -1; diff --git a/trunk/drivers/usb/input/usbkbd.c b/trunk/drivers/usb/input/usbkbd.c index 8505824848f6..dac88640eab6 100644 --- a/trunk/drivers/usb/input/usbkbd.c +++ b/trunk/drivers/usb/input/usbkbd.c @@ -122,7 +122,7 @@ static void usb_kbd_irq(struct urb *urb) memcpy(kbd->old, kbd->new, 8); resubmit: - i = usb_submit_urb (urb, GFP_ATOMIC); + i = usb_submit_urb (urb, SLAB_ATOMIC); if (i) err ("can't resubmit intr, %s-%s/input0, status %d", kbd->usbdev->bus->bus_name, @@ -196,11 +196,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd) return -1; if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL))) return -1; - if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma))) + if (!(kbd->new = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &kbd->new_dma))) return -1; - if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma))) + if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), SLAB_ATOMIC, &kbd->cr_dma))) return -1; - if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma))) + if (!(kbd->leds = usb_buffer_alloc(dev, 1, SLAB_ATOMIC, &kbd->leds_dma))) return -1; return 0; diff --git a/trunk/drivers/usb/input/usbmouse.c b/trunk/drivers/usb/input/usbmouse.c index 64a33e420cfb..68a55642c082 100644 --- a/trunk/drivers/usb/input/usbmouse.c +++ b/trunk/drivers/usb/input/usbmouse.c @@ -86,7 +86,7 @@ static void usb_mouse_irq(struct urb *urb) input_sync(dev); resubmit: - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb (urb, SLAB_ATOMIC); if (status) err ("can't resubmit intr, %s-%s/input0, status %d", mouse->usbdev->bus->bus_name, @@ -137,7 +137,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i if (!mouse || !input_dev) goto fail1; - mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma); + mouse->data = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &mouse->data_dma); if (!mouse->data) goto fail1; diff --git a/trunk/drivers/usb/input/usbtouchscreen.c b/trunk/drivers/usb/input/usbtouchscreen.c index 7f3c57da9bc0..49704d4ed0e2 100644 --- a/trunk/drivers/usb/input/usbtouchscreen.c +++ b/trunk/drivers/usb/input/usbtouchscreen.c @@ -680,7 +680,7 @@ static int usbtouch_probe(struct usb_interface *intf, type->process_pkt = usbtouch_process_pkt; usbtouch->data = usb_buffer_alloc(udev, type->rept_size, - GFP_KERNEL, &usbtouch->data_dma); + SLAB_KERNEL, &usbtouch->data_dma); if (!usbtouch->data) goto out_free; diff --git a/trunk/drivers/usb/input/xpad.c b/trunk/drivers/usb/input/xpad.c index e4bc76ebc835..df97e5c803f9 100644 --- a/trunk/drivers/usb/input/xpad.c +++ b/trunk/drivers/usb/input/xpad.c @@ -325,7 +325,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id goto fail1; xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN, - GFP_ATOMIC, &xpad->idata_dma); + SLAB_ATOMIC, &xpad->idata_dma); if (!xpad->idata) goto fail1; diff --git a/trunk/drivers/usb/input/yealink.c b/trunk/drivers/usb/input/yealink.c index caff8e6d7448..2268ca311ade 100644 --- a/trunk/drivers/usb/input/yealink.c +++ b/trunk/drivers/usb/input/yealink.c @@ -874,17 +874,17 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) /* allocate usb buffers */ yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN, - GFP_ATOMIC, &yld->irq_dma); + SLAB_ATOMIC, &yld->irq_dma); if (yld->irq_data == NULL) return usb_cleanup(yld, -ENOMEM); yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN, - GFP_ATOMIC, &yld->ctl_dma); + SLAB_ATOMIC, &yld->ctl_dma); if (!yld->ctl_data) return usb_cleanup(yld, -ENOMEM); yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)), - GFP_ATOMIC, &yld->ctl_req_dma); + SLAB_ATOMIC, &yld->ctl_req_dma); if (yld->ctl_req == NULL) return usb_cleanup(yld, -ENOMEM); diff --git a/trunk/drivers/usb/misc/appledisplay.c b/trunk/drivers/usb/misc/appledisplay.c index 02cbb7fff24f..ba30ca6a14aa 100644 --- a/trunk/drivers/usb/misc/appledisplay.c +++ b/trunk/drivers/usb/misc/appledisplay.c @@ -76,7 +76,7 @@ struct appledisplay { char *urbdata; /* interrupt URB data buffer */ char *msgdata; /* control message data buffer */ - struct delayed_work work; + struct work_struct work; int button_pressed; spinlock_t lock; }; @@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb) case ACD_BTN_BRIGHT_UP: case ACD_BTN_BRIGHT_DOWN: pdata->button_pressed = 1; - queue_delayed_work(wq, &pdata->work, 0); + queue_work(wq, &pdata->work); break; case ACD_BTN_NONE: default: @@ -184,10 +184,9 @@ static struct backlight_properties appledisplay_bl_data = { .max_brightness = 0xFF }; -static void appledisplay_work(struct work_struct *work) +static void appledisplay_work(void *private) { - struct appledisplay *pdata = - container_of(work, struct appledisplay, work.work); + struct appledisplay *pdata = private; int retval; up(&pdata->bd->sem); @@ -239,7 +238,7 @@ static int appledisplay_probe(struct usb_interface *iface, pdata->udev = udev; spin_lock_init(&pdata->lock); - INIT_DELAYED_WORK(&pdata->work, appledisplay_work); + INIT_WORK(&pdata->work, appledisplay_work, pdata); /* Allocate buffer for control messages */ pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); diff --git a/trunk/drivers/usb/misc/ftdi-elan.c b/trunk/drivers/usb/misc/ftdi-elan.c index 18b1925032a8..cb0ba3107d7f 100644 --- a/trunk/drivers/usb/misc/ftdi-elan.c +++ b/trunk/drivers/usb/misc/ftdi-elan.c @@ -156,9 +156,9 @@ struct usb_ftdi { struct usb_device *udev; struct usb_interface *interface; struct usb_class_driver *class; - struct delayed_work status_work; - struct delayed_work command_work; - struct delayed_work respond_work; + struct work_struct status_work; + struct work_struct command_work; + struct work_struct respond_work; struct u132_platform_data platform_data; struct resource resources[0]; struct platform_device platform_dev; @@ -210,14 +210,23 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi) static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (!queue_delayed_work(status_queue, &ftdi->status_work, delta)) - kref_put(&ftdi->kref, ftdi_elan_delete); + if (delta > 0) { + if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) + return; + } else if (queue_work(status_queue, &ftdi->status_work)) + return; + kref_put(&ftdi->kref, ftdi_elan_delete); + return; } static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) - kref_get(&ftdi->kref); + if (delta > 0) { + if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) + kref_get(&ftdi->kref); + } else if (queue_work(status_queue, &ftdi->status_work)) + kref_get(&ftdi->kref); + return; } static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) @@ -228,14 +237,25 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (!queue_delayed_work(command_queue, &ftdi->command_work, delta)) - kref_put(&ftdi->kref, ftdi_elan_delete); + if (delta > 0) { + if (queue_delayed_work(command_queue, &ftdi->command_work, + delta)) + return; + } else if (queue_work(command_queue, &ftdi->command_work)) + return; + kref_put(&ftdi->kref, ftdi_elan_delete); + return; } static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (queue_delayed_work(command_queue, &ftdi->command_work, delta)) - kref_get(&ftdi->kref); + if (delta > 0) { + if (queue_delayed_work(command_queue, &ftdi->command_work, + delta)) + kref_get(&ftdi->kref); + } else if (queue_work(command_queue, &ftdi->command_work)) + kref_get(&ftdi->kref); + return; } static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) @@ -247,14 +267,25 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) - kref_put(&ftdi->kref, ftdi_elan_delete); + if (delta > 0) { + if (queue_delayed_work(respond_queue, &ftdi->respond_work, + delta)) + return; + } else if (queue_work(respond_queue, &ftdi->respond_work)) + return; + kref_put(&ftdi->kref, ftdi_elan_delete); + return; } static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { - if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) - kref_get(&ftdi->kref); + if (delta > 0) { + if (queue_delayed_work(respond_queue, &ftdi->respond_work, + delta)) + kref_get(&ftdi->kref); + } else if (queue_work(respond_queue, &ftdi->respond_work)) + kref_get(&ftdi->kref); + return; } static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) @@ -444,11 +475,9 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) return; } -static void ftdi_elan_command_work(struct work_struct *work) +static void ftdi_elan_command_work(void *data) { - struct usb_ftdi *ftdi = - container_of(work, struct usb_ftdi, command_work.work); - + struct usb_ftdi *ftdi = data; if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; @@ -471,10 +500,9 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi) return; } -static void ftdi_elan_respond_work(struct work_struct *work) +static void ftdi_elan_respond_work(void *data) { - struct usb_ftdi *ftdi = - container_of(work, struct usb_ftdi, respond_work.work); + struct usb_ftdi *ftdi = data; if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; @@ -506,10 +534,9 @@ static void ftdi_elan_respond_work(struct work_struct *work) * after the FTDI has been synchronized * */ -static void ftdi_elan_status_work(struct work_struct *work) +static void ftdi_elan_status_work(void *data) { - struct usb_ftdi *ftdi = - container_of(work, struct usb_ftdi, status_work.work); + struct usb_ftdi *ftdi = data; int work_delay_in_msec = 0; if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); @@ -2650,9 +2677,12 @@ static int ftdi_elan_probe(struct usb_interface *interface, ftdi->class = NULL; dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); - INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work); - INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work); - INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work); + INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, + (void *)ftdi); + INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, + (void *)ftdi); + INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work, + (void *)ftdi); ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); return 0; } else { diff --git a/trunk/drivers/usb/misc/phidgetkit.c b/trunk/drivers/usb/misc/phidgetkit.c index 371bf2b1197d..9110793f81d3 100644 --- a/trunk/drivers/usb/misc/phidgetkit.c +++ b/trunk/drivers/usb/misc/phidgetkit.c @@ -81,8 +81,8 @@ struct interfacekit { unsigned char *data; dma_addr_t data_dma; - struct delayed_work do_notify; - struct delayed_work do_resubmit; + struct work_struct do_notify; + struct work_struct do_resubmit; unsigned long input_events; unsigned long sensor_events; }; @@ -374,20 +374,19 @@ static void interfacekit_irq(struct urb *urb) } if (kit->input_events || kit->sensor_events) - schedule_delayed_work(&kit->do_notify, 0); + schedule_work(&kit->do_notify); resubmit: - status = usb_submit_urb(urb, GFP_ATOMIC); + status = usb_submit_urb(urb, SLAB_ATOMIC); if (status) err("can't resubmit intr, %s-%s/interfacekit0, status %d", kit->udev->bus->bus_name, kit->udev->devpath, status); } -static void do_notify(struct work_struct *work) +static void do_notify(void *data) { - struct interfacekit *kit = - container_of(work, struct interfacekit, do_notify.work); + struct interfacekit *kit = data; int i; char sysfs_file[8]; @@ -406,11 +405,9 @@ static void do_notify(struct work_struct *work) } } -static void do_resubmit(struct work_struct *work) +static void do_resubmit(void *data) { - struct interfacekit *kit = - container_of(work, struct interfacekit, do_resubmit.work); - set_outputs(kit); + set_outputs(data); } #define show_set_output(value) \ @@ -568,7 +565,7 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic kit->dev_no = -1; kit->ifkit = ifkit; - kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &kit->data_dma); + kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &kit->data_dma); if (!kit->data) goto out; @@ -578,8 +575,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic kit->udev = usb_get_dev(dev); kit->intf = intf; - INIT_DELAYED_WORK(&kit->do_notify, do_notify); - INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit); + INIT_WORK(&kit->do_notify, do_notify, kit); + INIT_WORK(&kit->do_resubmit, do_resubmit, kit); usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, interfacekit_irq, kit, endpoint->bInterval); diff --git a/trunk/drivers/usb/misc/phidgetmotorcontrol.c b/trunk/drivers/usb/misc/phidgetmotorcontrol.c index 5727e1ea2f91..c3469b0a67c2 100644 --- a/trunk/drivers/usb/misc/phidgetmotorcontrol.c +++ b/trunk/drivers/usb/misc/phidgetmotorcontrol.c @@ -41,7 +41,7 @@ struct motorcontrol { unsigned char *data; dma_addr_t data_dma; - struct delayed_work do_notify; + struct work_struct do_notify; unsigned long input_events; unsigned long speed_events; unsigned long exceed_events; @@ -148,10 +148,10 @@ static void motorcontrol_irq(struct urb *urb) set_bit(1, &mc->exceed_events); if (mc->input_events || mc->exceed_events || mc->speed_events) - schedule_delayed_work(&mc->do_notify, 0); + schedule_work(&mc->do_notify); resubmit: - status = usb_submit_urb(urb, GFP_ATOMIC); + status = usb_submit_urb(urb, SLAB_ATOMIC); if (status) dev_err(&mc->intf->dev, "can't resubmit intr, %s-%s/motorcontrol0, status %d", @@ -159,10 +159,9 @@ static void motorcontrol_irq(struct urb *urb) mc->udev->devpath, status); } -static void do_notify(struct work_struct *work) +static void do_notify(void *data) { - struct motorcontrol *mc = - container_of(work, struct motorcontrol, do_notify.work); + struct motorcontrol *mc = data; int i; char sysfs_file[8]; @@ -338,7 +337,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic goto out; mc->dev_no = -1; - mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &mc->data_dma); + mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &mc->data_dma); if (!mc->data) goto out; @@ -349,7 +348,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic mc->udev = usb_get_dev(dev); mc->intf = intf; mc->acceleration[0] = mc->acceleration[1] = 10; - INIT_DELAYED_WORK(&mc->do_notify, do_notify); + INIT_WORK(&mc->do_notify, do_notify, mc); usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, motorcontrol_irq, mc, endpoint->bInterval); diff --git a/trunk/drivers/usb/misc/usbtest.c b/trunk/drivers/usb/misc/usbtest.c index fb321864a92d..194065dbb51f 100644 --- a/trunk/drivers/usb/misc/usbtest.c +++ b/trunk/drivers/usb/misc/usbtest.c @@ -213,7 +213,7 @@ static struct urb *simple_alloc_urb ( if (bytes < 0) return NULL; - urb = usb_alloc_urb (0, GFP_KERNEL); + urb = usb_alloc_urb (0, SLAB_KERNEL); if (!urb) return urb; usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL); @@ -223,7 +223,7 @@ static struct urb *simple_alloc_urb ( urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; if (usb_pipein (pipe)) urb->transfer_flags |= URB_SHORT_NOT_OK; - urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL, + urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL, &urb->transfer_dma); if (!urb->transfer_buffer) { usb_free_urb (urb); @@ -315,7 +315,7 @@ static int simple_io ( init_completion (&completion); if (usb_pipeout (urb->pipe)) simple_fill_buf (urb); - if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) + if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) break; /* NOTE: no timeouts; can't be broken out of by interrupt */ @@ -374,7 +374,7 @@ alloc_sglist (int nents, int max, int vary) unsigned i; unsigned size = max; - sg = kmalloc (nents * sizeof *sg, GFP_KERNEL); + sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL); if (!sg) return NULL; @@ -382,7 +382,7 @@ alloc_sglist (int nents, int max, int vary) char *buf; unsigned j; - buf = kzalloc (size, GFP_KERNEL); + buf = kzalloc (size, SLAB_KERNEL); if (!buf) { free_sglist (sg, i); return NULL; @@ -428,7 +428,7 @@ static int perform_sglist ( (udev->speed == USB_SPEED_HIGH) ? (INTERRUPT_RATE << 3) : INTERRUPT_RATE, - sg, nents, 0, GFP_KERNEL); + sg, nents, 0, SLAB_KERNEL); if (retval) break; @@ -819,7 +819,7 @@ static void ctrl_complete (struct urb *urb) /* resubmit if we need to, else mark this as done */ if ((status == 0) && (ctx->pending < ctx->count)) { - if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) { + if ((status = usb_submit_urb (urb, SLAB_ATOMIC)) != 0) { dbg ("can't resubmit ctrl %02x.%02x, err %d", reqp->bRequestType, reqp->bRequest, status); urb->dev = NULL; @@ -855,7 +855,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) * as with bulk/intr sglists, sglen is the queue depth; it also * controls which subtests run (more tests than sglen) or rerun. */ - urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); + urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL); if (!urb) return -ENOMEM; for (i = 0; i < param->sglen; i++) { @@ -981,7 +981,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) if (!u) goto cleanup; - reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL, + reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL, &u->setup_dma); if (!reqp) goto cleanup; @@ -999,7 +999,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param) context.urb = urb; spin_lock_irq (&context.lock); for (i = 0; i < param->sglen; i++) { - context.status = usb_submit_urb (urb [i], GFP_ATOMIC); + context.status = usb_submit_urb (urb [i], SLAB_ATOMIC); if (context.status != 0) { dbg ("can't submit urb[%d], status %d", i, context.status); @@ -1041,7 +1041,7 @@ static void unlink1_callback (struct urb *urb) // we "know" -EPIPE (stall) never happens if (!status) - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb (urb, SLAB_ATOMIC); if (status) { urb->status = status; complete ((struct completion *) urb->context); @@ -1067,7 +1067,7 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async) * FIXME want additional tests for when endpoint is STALLing * due to errors, or is just NAKing requests. */ - if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) { + if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) { dev_dbg (&dev->intf->dev, "submit fail %d\n", retval); return retval; } @@ -1251,7 +1251,7 @@ static int ctrl_out (struct usbtest_dev *dev, if (length < 1 || length > 0xffff || vary >= length) return -EINVAL; - buf = kmalloc(length, GFP_KERNEL); + buf = kmalloc(length, SLAB_KERNEL); if (!buf) return -ENOMEM; @@ -1403,7 +1403,7 @@ static struct urb *iso_alloc_urb ( maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11)); packets = (bytes + maxp - 1) / maxp; - urb = usb_alloc_urb (packets, GFP_KERNEL); + urb = usb_alloc_urb (packets, SLAB_KERNEL); if (!urb) return urb; urb->dev = udev; @@ -1411,7 +1411,7 @@ static struct urb *iso_alloc_urb ( urb->number_of_packets = packets; urb->transfer_buffer_length = bytes; - urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL, + urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL, &urb->transfer_dma); if (!urb->transfer_buffer) { usb_free_urb (urb); @@ -1481,7 +1481,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param, spin_lock_irq (&context.lock); for (i = 0; i < param->sglen; i++) { ++context.pending; - status = usb_submit_urb (urbs [i], GFP_ATOMIC); + status = usb_submit_urb (urbs [i], SLAB_ATOMIC); if (status < 0) { ERROR (dev, "submit iso[%d], error %d\n", i, status); if (i == 0) { @@ -1900,7 +1900,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id) } #endif - dev = kzalloc(sizeof(*dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), SLAB_KERNEL); if (!dev) return -ENOMEM; info = (struct usbtest_info *) id->driver_info; @@ -1910,7 +1910,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id) dev->intf = intf; /* cacheline-aligned scratch for i/o */ - if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) { + if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) { kfree (dev); return -ENOMEM; } diff --git a/trunk/drivers/usb/mon/mon_text.c b/trunk/drivers/usb/mon/mon_text.c index 05cf2c9a8f84..7a2346c53284 100644 --- a/trunk/drivers/usb/mon/mon_text.c +++ b/trunk/drivers/usb/mon/mon_text.c @@ -50,7 +50,7 @@ struct mon_event_text { #define SLAB_NAME_SZ 30 struct mon_reader_text { - struct kmem_cache *e_slab; + kmem_cache_t *e_slab; int nevents; struct list_head e_list; struct mon_reader r; /* In C, parent class can be placed anywhere */ @@ -63,7 +63,7 @@ struct mon_reader_text { char slab_name[SLAB_NAME_SZ]; }; -static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); +static void mon_text_ctor(void *, kmem_cache_t *, unsigned long); /* * mon_text_submit @@ -147,7 +147,7 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb, stamp = mon_get_timestamp(); if (rp->nevents >= EVENT_MAX || - (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) { rp->r.m_bus->cnt_text_lost++; return; } @@ -188,7 +188,7 @@ static void mon_text_error(void *data, struct urb *urb, int error) struct mon_event_text *ep; if (rp->nevents >= EVENT_MAX || - (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) { rp->r.m_bus->cnt_text_lost++; return; } @@ -450,7 +450,7 @@ const struct file_operations mon_fops_text = { /* * Slab interface: constructor. */ -static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags) +static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags) { /* * Nothing to initialize. No, really! diff --git a/trunk/drivers/usb/net/catc.c b/trunk/drivers/usb/net/catc.c index 4852012735f6..907b820a5faf 100644 --- a/trunk/drivers/usb/net/catc.c +++ b/trunk/drivers/usb/net/catc.c @@ -345,7 +345,7 @@ static void catc_irq_done(struct urb *urb) } } resubmit: - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb (urb, SLAB_ATOMIC); if (status) err ("can't resubmit intr, %s-%s, status %d", catc->usbdev->bus->bus_name, diff --git a/trunk/drivers/usb/net/kaweth.c b/trunk/drivers/usb/net/kaweth.c index fa78326d0bf0..7c906a43e497 100644 --- a/trunk/drivers/usb/net/kaweth.c +++ b/trunk/drivers/usb/net/kaweth.c @@ -222,7 +222,7 @@ struct kaweth_device int suspend_lowmem_ctrl; int linkstate; int opened; - struct delayed_work lowmem_work; + struct work_struct lowmem_work; struct usb_device *dev; struct net_device *net; @@ -530,10 +530,9 @@ static void int_callback(struct urb *u) kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); } -static void kaweth_resubmit_tl(struct work_struct *work) +static void kaweth_resubmit_tl(void *d) { - struct kaweth_device *kaweth = - container_of(work, struct kaweth_device, lowmem_work.work); + struct kaweth_device *kaweth = (struct kaweth_device *)d; if (IS_BLOCKED(kaweth->status)) return; @@ -1127,7 +1126,7 @@ static int kaweth_probe( /* kaweth is zeroed as part of alloc_netdev */ - INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); + INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); SET_MODULE_OWNER(netdev); diff --git a/trunk/drivers/usb/net/net1080.c b/trunk/drivers/usb/net/net1080.c index 493635954513..a77410562e12 100644 --- a/trunk/drivers/usb/net/net1080.c +++ b/trunk/drivers/usb/net/net1080.c @@ -383,7 +383,7 @@ static void nc_ensure_sync(struct usbnet *dev) int status; /* Send a flush */ - urb = usb_alloc_urb(0, GFP_ATOMIC); + urb = usb_alloc_urb(0, SLAB_ATOMIC); if (!urb) return; diff --git a/trunk/drivers/usb/net/pegasus.c b/trunk/drivers/usb/net/pegasus.c index d48c024cff59..69eb0db399df 100644 --- a/trunk/drivers/usb/net/pegasus.c +++ b/trunk/drivers/usb/net/pegasus.c @@ -856,7 +856,7 @@ static void intr_callback(struct urb *urb) pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; } - status = usb_submit_urb(urb, GFP_ATOMIC); + status = usb_submit_urb(urb, SLAB_ATOMIC); if (status == -ENODEV) netif_device_detach(pegasus->net); if (status && netif_msg_timer(pegasus)) @@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus) static struct workqueue_struct *pegasus_workqueue = NULL; #define CARRIER_CHECK_DELAY (2 * HZ) -static void check_carrier(struct work_struct *work) +static void check_carrier(void *data) { - pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); + pegasus_t *pegasus = data; set_carrier(pegasus->net); if (!(pegasus->flags & PEGASUS_UNPLUG)) { queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, @@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf, tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); - INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); + INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); pegasus->intf = intf; pegasus->usb = dev; diff --git a/trunk/drivers/usb/net/pegasus.h b/trunk/drivers/usb/net/pegasus.h index 98f6898cae1f..006438069b66 100644 --- a/trunk/drivers/usb/net/pegasus.h +++ b/trunk/drivers/usb/net/pegasus.h @@ -95,7 +95,7 @@ typedef struct pegasus { int dev_index; int intr_interval; struct tasklet_struct rx_tl; - struct delayed_work carrier_check; + struct work_struct carrier_check; struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; struct sk_buff *rx_pool[RX_SKBS]; struct sk_buff *rx_skb; diff --git a/trunk/drivers/usb/net/rndis_host.c b/trunk/drivers/usb/net/rndis_host.c index 99f26b3e502f..c2a28d88ef3c 100644 --- a/trunk/drivers/usb/net/rndis_host.c +++ b/trunk/drivers/usb/net/rndis_host.c @@ -469,7 +469,7 @@ static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) struct rndis_halt *halt; /* try to clear any rndis state/activity (no i/o from stack!) */ - halt = kcalloc(1, sizeof *halt, GFP_KERNEL); + halt = kcalloc(1, sizeof *halt, SLAB_KERNEL); if (halt) { halt->msg_type = RNDIS_MSG_HALT; halt->msg_len = ccpu2(sizeof *halt); diff --git a/trunk/drivers/usb/net/rtl8150.c b/trunk/drivers/usb/net/rtl8150.c index c54235f73cb6..72171f94ded4 100644 --- a/trunk/drivers/usb/net/rtl8150.c +++ b/trunk/drivers/usb/net/rtl8150.c @@ -587,7 +587,7 @@ static void intr_callback(struct urb *urb) } resubmit: - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb (urb, SLAB_ATOMIC); if (status == -ENODEV) netif_device_detach(dev->netdev); else if (status) diff --git a/trunk/drivers/usb/net/usbnet.c b/trunk/drivers/usb/net/usbnet.c index 6e39e9988259..7672e11c94c4 100644 --- a/trunk/drivers/usb/net/usbnet.c +++ b/trunk/drivers/usb/net/usbnet.c @@ -179,9 +179,9 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) period = max ((int) dev->status->desc.bInterval, (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); - buf = kmalloc (maxp, GFP_KERNEL); + buf = kmalloc (maxp, SLAB_KERNEL); if (buf) { - dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); + dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL); if (!dev->interrupt) { kfree (buf); return -ENOMEM; @@ -782,10 +782,9 @@ static struct ethtool_ops usbnet_ethtool_ops = { * especially now that control transfers can be queued. */ static void -kevent (struct work_struct *work) +kevent (void *data) { - struct usbnet *dev = - container_of(work, struct usbnet, kevent); + struct usbnet *dev = data; int status; /* usb_clear_halt() needs a thread context */ @@ -1147,7 +1146,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) skb_queue_head_init (&dev->done); dev->bh.func = usbnet_bh; dev->bh.data = (unsigned long) dev; - INIT_WORK (&dev->kevent, kevent); + INIT_WORK (&dev->kevent, kevent, dev); dev->delay.function = usbnet_bh; dev->delay.data = (unsigned long) dev; init_timer (&dev->delay); diff --git a/trunk/drivers/usb/serial/aircable.c b/trunk/drivers/usb/serial/aircable.c index 86bcf63b6ba5..b1b5707bc99a 100644 --- a/trunk/drivers/usb/serial/aircable.c +++ b/trunk/drivers/usb/serial/aircable.c @@ -92,7 +92,6 @@ struct aircable_private { struct circ_buf *rx_buf; /* read buffer */ int rx_flags; /* for throttilng */ struct work_struct rx_work; /* work cue for the receiving line */ - struct usb_serial_port *port; /* USB port with which associated */ }; /* Private methods */ @@ -252,11 +251,10 @@ static void aircable_send(struct usb_serial_port *port) schedule_work(&port->work); } -static void aircable_read(struct work_struct *work) +static void aircable_read(void *params) { - struct aircable_private *priv = - container_of(work, struct aircable_private, rx_work); - struct usb_serial_port *port = priv->port; + struct usb_serial_port *port = params; + struct aircable_private *priv = usb_get_serial_port_data(port); struct tty_struct *tty; unsigned char *data; int count; @@ -351,8 +349,7 @@ static int aircable_attach (struct usb_serial *serial) } priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); - priv->port = port; - INIT_WORK(&priv->rx_work, aircable_read); + INIT_WORK(&priv->rx_work, aircable_read, port); usb_set_serial_port_data(serial->port[0], priv); @@ -519,7 +516,7 @@ static void aircable_read_bulk_callback(struct urb *urb) package_length - shift); } } - aircable_read(&priv->rx_work); + aircable_read(port); } /* Schedule the next read _if_ we are still open */ diff --git a/trunk/drivers/usb/serial/digi_acceleport.c b/trunk/drivers/usb/serial/digi_acceleport.c index 83d0e21145b0..5e3ac281a2f8 100644 --- a/trunk/drivers/usb/serial/digi_acceleport.c +++ b/trunk/drivers/usb/serial/digi_acceleport.c @@ -430,14 +430,13 @@ struct digi_port { int dp_in_close; /* close in progress */ wait_queue_head_t dp_close_wait; /* wait queue for close */ struct work_struct dp_wakeup_work; - struct usb_serial_port *dp_port; }; /* Local Function Declarations */ static void digi_wakeup_write( struct usb_serial_port *port ); -static void digi_wakeup_write_lock(struct work_struct *work); +static void digi_wakeup_write_lock(void *); static int digi_write_oob_command( struct usb_serial_port *port, unsigned char *buf, int count, int interruptible ); static int digi_write_inb_command( struct usb_serial_port *port, @@ -599,12 +598,11 @@ static inline long cond_wait_interruptible_timeout_irqrestore( * on writes. */ -static void digi_wakeup_write_lock(struct work_struct *work) +static void digi_wakeup_write_lock(void *arg) { - struct digi_port *priv = - container_of(work, struct digi_port, dp_wakeup_work); - struct usb_serial_port *port = priv->dp_port; + struct usb_serial_port *port = arg; unsigned long flags; + struct digi_port *priv = usb_get_serial_port_data(port); spin_lock_irqsave( &priv->dp_port_lock, flags ); @@ -1704,8 +1702,8 @@ dbg( "digi_startup: TOP" ); init_waitqueue_head( &priv->dp_flush_wait ); priv->dp_in_close = 0; init_waitqueue_head( &priv->dp_close_wait ); - INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); - priv->dp_port = serial->port[i]; + INIT_WORK(&priv->dp_wakeup_work, + digi_wakeup_write_lock, serial->port[i]); /* initialize write wait queue for this port */ init_waitqueue_head( &serial->port[i]->write_wait ); diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index 72e4d48f51e9..89ce2775be15 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -559,8 +559,7 @@ struct ftdi_private { char prev_status, diff_status; /* Used for TIOCMIWAIT */ __u8 rx_flags; /* receive state flags (throttling) */ spinlock_t rx_lock; /* spinlock for receive state */ - struct delayed_work rx_work; - struct usb_serial_port *port; + struct work_struct rx_work; int rx_processed; unsigned long rx_bytes; @@ -594,7 +593,7 @@ static int ftdi_write_room (struct usb_serial_port *port); static int ftdi_chars_in_buffer (struct usb_serial_port *port); static void ftdi_write_bulk_callback (struct urb *urb); static void ftdi_read_bulk_callback (struct urb *urb); -static void ftdi_process_read (struct work_struct *work); +static void ftdi_process_read (void *param); static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); @@ -1202,8 +1201,7 @@ static int ftdi_sio_attach (struct usb_serial *serial) port->read_urb->transfer_buffer_length = BUFSZ; } - INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read); - priv->port = port; + INIT_WORK(&priv->rx_work, ftdi_process_read, port); /* Free port's existing write urb and transfer buffer. */ if (port->write_urb) { @@ -1642,18 +1640,17 @@ static void ftdi_read_bulk_callback (struct urb *urb) priv->rx_bytes += countread; spin_unlock_irqrestore(&priv->rx_lock, flags); - ftdi_process_read(&priv->rx_work.work); + ftdi_process_read(port); } /* ftdi_read_bulk_callback */ -static void ftdi_process_read (struct work_struct *work) +static void ftdi_process_read (void *param) { /* ftdi_process_read */ - struct ftdi_private *priv = - container_of(work, struct ftdi_private, rx_work.work); - struct usb_serial_port *port = priv->port; + struct usb_serial_port *port = (struct usb_serial_port*)param; struct urb *urb; struct tty_struct *tty; + struct ftdi_private *priv; char error_flag; unsigned char *data; @@ -2182,7 +2179,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port) spin_unlock_irqrestore(&priv->rx_lock, flags); if (actually_throttled) - schedule_delayed_work(&priv->rx_work, 0); + schedule_work(&priv->rx_work); } static int __init ftdi_init (void) diff --git a/trunk/drivers/usb/serial/keyspan_pda.c b/trunk/drivers/usb/serial/keyspan_pda.c index e09a0bfe6231..909005107ea2 100644 --- a/trunk/drivers/usb/serial/keyspan_pda.c +++ b/trunk/drivers/usb/serial/keyspan_pda.c @@ -120,8 +120,6 @@ struct keyspan_pda_private { int tx_throttled; struct work_struct wakeup_work; struct work_struct unthrottle_work; - struct usb_serial *serial; - struct usb_serial_port *port; }; @@ -177,11 +175,9 @@ static struct usb_device_id id_table_fake_xircom [] = { }; #endif -static void keyspan_pda_wakeup_write(struct work_struct *work) +static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) { - struct keyspan_pda_private *priv = - container_of(work, struct keyspan_pda_private, wakeup_work); - struct usb_serial_port *port = priv->port; + struct tty_struct *tty = port->tty; /* wake up port processes */ @@ -191,11 +187,8 @@ static void keyspan_pda_wakeup_write(struct work_struct *work) tty_wakeup(tty); } -static void keyspan_pda_request_unthrottle(struct work_struct *work) +static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) { - struct keyspan_pda_private *priv = - container_of(work, struct keyspan_pda_private, unthrottle_work); - struct usb_serial *serial = priv->serial; int result; dbg(" request_unthrottle"); @@ -772,10 +765,11 @@ static int keyspan_pda_startup (struct usb_serial *serial) return (1); /* error */ usb_set_serial_port_data(serial->port[0], priv); init_waitqueue_head(&serial->port[0]->write_wait); - INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); - INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); - priv->serial = serial; - priv->port = serial->port[0]; + INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, + (void *)(serial->port[0])); + INIT_WORK(&priv->unthrottle_work, + (void *)keyspan_pda_request_unthrottle, + (void *)(serial)); return (0); } diff --git a/trunk/drivers/usb/serial/mos7720.c b/trunk/drivers/usb/serial/mos7720.c index 70f93b18292f..82cd15b894b0 100644 --- a/trunk/drivers/usb/serial/mos7720.c +++ b/trunk/drivers/usb/serial/mos7720.c @@ -363,7 +363,7 @@ static int mos7720_open(struct usb_serial_port *port, struct file * filp) /* Initialising the write urb pool */ for (j = 0; j < NUM_URBS; ++j) { - urb = usb_alloc_urb(0,GFP_ATOMIC); + urb = usb_alloc_urb(0,SLAB_ATOMIC); mos7720_port->write_urb_pool[j] = urb; if (urb == NULL) { diff --git a/trunk/drivers/usb/serial/mos7840.c b/trunk/drivers/usb/serial/mos7840.c index 5432c6340086..02c89e10b2cf 100644 --- a/trunk/drivers/usb/serial/mos7840.c +++ b/trunk/drivers/usb/serial/mos7840.c @@ -826,7 +826,7 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp) /* Initialising the write urb pool */ for (j = 0; j < NUM_URBS; ++j) { - urb = usb_alloc_urb(0, GFP_ATOMIC); + urb = usb_alloc_urb(0, SLAB_ATOMIC); mos7840_port->write_urb_pool[j] = urb; if (urb == NULL) { @@ -2786,7 +2786,7 @@ static int mos7840_startup(struct usb_serial *serial) i + 1, status); } - mos7840_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC); + mos7840_port->control_urb = usb_alloc_urb(0, SLAB_ATOMIC); mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL); } diff --git a/trunk/drivers/usb/serial/usb-serial.c b/trunk/drivers/usb/serial/usb-serial.c index 3d5072f14b8d..c1257d5292f5 100644 --- a/trunk/drivers/usb/serial/usb-serial.c +++ b/trunk/drivers/usb/serial/usb-serial.c @@ -533,10 +533,9 @@ void usb_serial_port_softint(struct usb_serial_port *port) schedule_work(&port->work); } -static void usb_serial_port_work(struct work_struct *work) +static void usb_serial_port_work(void *private) { - struct usb_serial_port *port = - container_of(work, struct usb_serial_port, work); + struct usb_serial_port *port = private; struct tty_struct *tty; dbg("%s - port %d", __FUNCTION__, port->number); @@ -800,7 +799,7 @@ int usb_serial_probe(struct usb_interface *interface, port->serial = serial; spin_lock_init(&port->lock); mutex_init(&port->mutex); - INIT_WORK(&port->work, usb_serial_port_work); + INIT_WORK(&port->work, usb_serial_port_work, port); serial->port[i] = port; } diff --git a/trunk/drivers/usb/serial/whiteheat.c b/trunk/drivers/usb/serial/whiteheat.c index 154c7d290597..4d1cd7aeccd3 100644 --- a/trunk/drivers/usb/serial/whiteheat.c +++ b/trunk/drivers/usb/serial/whiteheat.c @@ -227,7 +227,6 @@ struct whiteheat_private { struct list_head rx_urbs_submitted; struct list_head rx_urb_q; struct work_struct rx_work; - struct usb_serial_port *port; struct list_head tx_urbs_free; struct list_head tx_urbs_submitted; }; @@ -242,7 +241,7 @@ static void command_port_read_callback(struct urb *urb); static int start_port_read(struct usb_serial_port *port); static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); static struct list_head *list_first(struct list_head *head); -static void rx_data_softint(struct work_struct *work); +static void rx_data_softint(void *private); static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); static int firm_open(struct usb_serial_port *port); @@ -425,8 +424,7 @@ static int whiteheat_attach (struct usb_serial *serial) spin_lock_init(&info->lock); info->flags = 0; info->mcr = 0; - INIT_WORK(&info->rx_work, rx_data_softint); - info->port = port; + INIT_WORK(&info->rx_work, rx_data_softint, port); INIT_LIST_HEAD(&info->rx_urbs_free); INIT_LIST_HEAD(&info->rx_urbs_submitted); @@ -951,7 +949,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port) spin_unlock_irqrestore(&info->lock, flags); if (actually_throttled) - rx_data_softint(&info->rx_work); + rx_data_softint(port); return; } @@ -1402,11 +1400,10 @@ static struct list_head *list_first(struct list_head *head) } -static void rx_data_softint(struct work_struct *work) +static void rx_data_softint(void *private) { - struct whiteheat_private *info = - container_of(work, struct whiteheat_private, rx_work); - struct usb_serial_port *port = info->port; + struct usb_serial_port *port = (struct usb_serial_port *)private; + struct whiteheat_private *info = usb_get_serial_port_data(port); struct tty_struct *tty = port->tty; struct whiteheat_urb_wrap *wrap; struct urb *urb; diff --git a/trunk/drivers/usb/storage/onetouch.c b/trunk/drivers/usb/storage/onetouch.c index e565d3d2ab29..3a158d58441f 100644 --- a/trunk/drivers/usb/storage/onetouch.c +++ b/trunk/drivers/usb/storage/onetouch.c @@ -76,7 +76,7 @@ static void usb_onetouch_irq(struct urb *urb) input_sync(dev); resubmit: - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb (urb, SLAB_ATOMIC); if (status) err ("can't resubmit intr, %s-%s/input0, status %d", onetouch->udev->bus->bus_name, @@ -154,7 +154,7 @@ int onetouch_connect_input(struct us_data *ss) goto fail1; onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN, - GFP_ATOMIC, &onetouch->data_dma); + SLAB_ATOMIC, &onetouch->data_dma); if (!onetouch->data) goto fail1; diff --git a/trunk/drivers/usb/storage/transport.c b/trunk/drivers/usb/storage/transport.c index 323293a3e61f..47644b5b6155 100644 --- a/trunk/drivers/usb/storage/transport.c +++ b/trunk/drivers/usb/storage/transport.c @@ -427,7 +427,7 @@ static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe, US_DEBUGP("%s: xfer %u bytes, %d entries\n", __FUNCTION__, length, num_sg); result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0, - sg, num_sg, length, GFP_NOIO); + sg, num_sg, length, SLAB_NOIO); if (result) { US_DEBUGP("usb_sg_init returned %d\n", result); return USB_STOR_XFER_ERROR; diff --git a/trunk/drivers/usb/storage/usb.c b/trunk/drivers/usb/storage/usb.c index 70644506651f..b401084b3d22 100644 --- a/trunk/drivers/usb/storage/usb.c +++ b/trunk/drivers/usb/storage/usb.c @@ -49,7 +49,7 @@ #include #include -#include +#include #include #include #include diff --git a/trunk/drivers/video/console/fbcon.c b/trunk/drivers/video/console/fbcon.c index 31f476a64790..302174b8e477 100644 --- a/trunk/drivers/video/console/fbcon.c +++ b/trunk/drivers/video/console/fbcon.c @@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc) softback_top = 0; } -static void fb_flashcursor(struct work_struct *work) +static void fb_flashcursor(void *private) { - struct fb_info *info = container_of(work, struct fb_info, queue); + struct fb_info *info = private; struct fbcon_ops *ops = info->fbcon_par; struct display *p; struct vc_data *vc = NULL; @@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info) if ((!info->queue.func || info->queue.func == fb_flashcursor) && !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { if (!info->queue.func) - INIT_WORK(&info->queue, fb_flashcursor); + INIT_WORK(&info->queue, fb_flashcursor, info); init_timer(&ops->cursor_timer); ops->cursor_timer.function = cursor_timer_handler; diff --git a/trunk/drivers/video/geode/gxfb_core.c b/trunk/drivers/video/geode/gxfb_core.c index a454dcb8e215..0d3643fc6293 100644 --- a/trunk/drivers/video/geode/gxfb_core.c +++ b/trunk/drivers/video/geode/gxfb_core.c @@ -380,7 +380,7 @@ static void gxfb_remove(struct pci_dev *pdev) } static struct pci_device_id gxfb_id_table[] = { - { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO, + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, { 0, } diff --git a/trunk/drivers/video/pxafb.c b/trunk/drivers/video/pxafb.c index 38eb0b69c2d7..8a8ae55a7403 100644 --- a/trunk/drivers/video/pxafb.c +++ b/trunk/drivers/video/pxafb.c @@ -964,10 +964,9 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) * Our LCD controller task (which is called when we blank or unblank) * via keventd. */ -static void pxafb_task(struct work_struct *work) +static void pxafb_task(void *dummy) { - struct pxafb_info *fbi = - container_of(work, struct pxafb_info, task); + struct pxafb_info *fbi = dummy; u_int state = xchg(&fbi->task_state, -1); set_ctrlr_state(fbi, state); @@ -1160,7 +1159,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) } init_waitqueue_head(&fbi->ctrlr_wait); - INIT_WORK(&fbi->task, pxafb_task); + INIT_WORK(&fbi->task, pxafb_task, fbi); init_MUTEX(&fbi->ctrlr_sem); return fbi; diff --git a/trunk/drivers/w1/Makefile b/trunk/drivers/w1/Makefile index 6bb0b54965f2..93845a2c7c21 100644 --- a/trunk/drivers/w1/Makefile +++ b/trunk/drivers/w1/Makefile @@ -2,6 +2,10 @@ # Makefile for the Dallas's 1-wire bus. # +ifeq ($(CONFIG_W1_DS2433_CRC), y) +EXTRA_CFLAGS += -DCONFIG_W1_F23_CRC +endif + obj-$(CONFIG_W1) += wire.o wire-objs := w1.o w1_int.o w1_family.o w1_netlink.o w1_io.o diff --git a/trunk/drivers/w1/slaves/Makefile b/trunk/drivers/w1/slaves/Makefile index 725dcfdfddb4..70e21e2d70c3 100644 --- a/trunk/drivers/w1/slaves/Makefile +++ b/trunk/drivers/w1/slaves/Makefile @@ -2,6 +2,10 @@ # Makefile for the Dallas's 1-wire slaves. # +ifeq ($(CONFIG_W1_SLAVE_DS2433_CRC), y) +EXTRA_CFLAGS += -DCONFIG_W1_F23_CRC +endif + obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o diff --git a/trunk/drivers/w1/slaves/w1_ds2433.c b/trunk/drivers/w1/slaves/w1_ds2433.c index 8ea17a53eed8..2ac238f1480e 100644 --- a/trunk/drivers/w1/slaves/w1_ds2433.c +++ b/trunk/drivers/w1/slaves/w1_ds2433.c @@ -13,7 +13,7 @@ #include #include #include -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC #include #define CRC16_INIT 0 @@ -62,7 +62,7 @@ static inline size_t w1_f23_fix_count(loff_t off, size_t count, size_t size) return count; } -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data, int block) { @@ -89,13 +89,13 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data, return 0; } -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC struct w1_f23_data *data = sl->family_data; int i, min_page, max_page; #else @@ -107,7 +107,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off, mutex_lock(&sl->master->mutex); -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC min_page = (off >> W1_PAGE_BITS); max_page = (off + count - 1) >> W1_PAGE_BITS; @@ -119,7 +119,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off, } memcpy(buf, &data->memory[off], count); -#else /* CONFIG_W1_SLAVE_DS2433_CRC */ +#else /* CONFIG_W1_F23_CRC */ /* read directly from the EEPROM */ if (w1_reset_select_slave(sl)) { @@ -133,7 +133,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off, w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, buf, count); -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ out_up: mutex_unlock(&sl->master->mutex); @@ -208,7 +208,7 @@ static ssize_t w1_f23_write_bin(struct kobject *kobj, char *buf, loff_t off, if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0) return 0; -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC /* can only write full blocks in cached mode */ if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) { dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n", @@ -223,7 +223,7 @@ static ssize_t w1_f23_write_bin(struct kobject *kobj, char *buf, loff_t off, return -EINVAL; } } -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ mutex_lock(&sl->master->mutex); @@ -262,7 +262,7 @@ static struct bin_attribute w1_f23_bin_attr = { static int w1_f23_add_slave(struct w1_slave *sl) { int err; -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC struct w1_f23_data *data; data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL); @@ -271,24 +271,24 @@ static int w1_f23_add_slave(struct w1_slave *sl) memset(data, 0, sizeof(struct w1_f23_data)); sl->family_data = data; -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr); -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC if (err) kfree(data); -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ return err; } static void w1_f23_remove_slave(struct w1_slave *sl) { -#ifdef CONFIG_W1_SLAVE_DS2433_CRC +#ifdef CONFIG_W1_F23_CRC kfree(sl->family_data); sl->family_data = NULL; -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#endif /* CONFIG_W1_F23_CRC */ sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr); } diff --git a/trunk/drivers/w1/w1.c b/trunk/drivers/w1/w1.c index 63c07243993c..de3e9791f80d 100644 --- a/trunk/drivers/w1/w1.c +++ b/trunk/drivers/w1/w1.c @@ -31,7 +31,6 @@ #include #include #include -#include #include diff --git a/trunk/fs/9p/mux.c b/trunk/fs/9p/mux.c index 944273c3dbff..90a79c784549 100644 --- a/trunk/fs/9p/mux.c +++ b/trunk/fs/9p/mux.c @@ -110,8 +110,8 @@ struct v9fs_mux_rpc { }; static int v9fs_poll_proc(void *); -static void v9fs_read_work(struct work_struct *work); -static void v9fs_write_work(struct work_struct *work); +static void v9fs_read_work(void *); +static void v9fs_write_work(void *); static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, poll_table * p); static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); @@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, m->rbuf = NULL; m->wpos = m->wsize = 0; m->wbuf = NULL; - INIT_WORK(&m->rq, v9fs_read_work); - INIT_WORK(&m->wq, v9fs_write_work); + INIT_WORK(&m->rq, v9fs_read_work, m); + INIT_WORK(&m->wq, v9fs_write_work, m); m->wsched = 0; memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); m->poll_task = NULL; @@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a) /** * v9fs_write_work - called when a transport can send some data */ -static void v9fs_write_work(struct work_struct *work) +static void v9fs_write_work(void *a) { int n, err; struct v9fs_mux_data *m; struct v9fs_req *req; - m = container_of(work, struct v9fs_mux_data, wq); + m = a; if (m->err < 0) { clear_bit(Wworksched, &m->wsched); @@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) /** * v9fs_read_work - called when there is some data to be read from a transport */ -static void v9fs_read_work(struct work_struct *work) +static void v9fs_read_work(void *a) { int n, err; struct v9fs_mux_data *m; @@ -572,7 +572,7 @@ static void v9fs_read_work(struct work_struct *work) struct v9fs_fcall *rcall; char *rbuf; - m = container_of(work, struct v9fs_mux_data, rq); + m = a; if (m->err < 0) return; diff --git a/trunk/fs/9p/vfs_inode.c b/trunk/fs/9p/vfs_inode.c index 18f26cdfd882..5241c600ce28 100644 --- a/trunk/fs/9p/vfs_inode.c +++ b/trunk/fs/9p/vfs_inode.c @@ -256,7 +256,7 @@ static int v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit) { - int fid; + u32 fid; int err; struct v9fs_fcall *fcall; @@ -310,7 +310,7 @@ static struct v9fs_fid* v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) { int err; - int nfid; + u32 nfid; struct v9fs_fid *ret; struct v9fs_fcall *fcall; diff --git a/trunk/fs/adfs/super.c b/trunk/fs/adfs/super.c index 5023351a7afe..9ade139086fc 100644 --- a/trunk/fs/adfs/super.c +++ b/trunk/fs/adfs/super.c @@ -36,7 +36,7 @@ void __adfs_error(struct super_block *sb, const char *function, const char *fmt, va_list args; va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); + vsprintf(error_buf, fmt, args); va_end(args); printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n", @@ -212,12 +212,12 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static struct kmem_cache *adfs_inode_cachep; +static kmem_cache_t *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); + ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode) kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; diff --git a/trunk/fs/affs/amigaffs.c b/trunk/fs/affs/amigaffs.c index f4de4b98004f..ccd624ef4272 100644 --- a/trunk/fs/affs/amigaffs.c +++ b/trunk/fs/affs/amigaffs.c @@ -445,7 +445,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...) va_list args; va_start(args,fmt); - vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); + vsprintf(ErrorBuffer,fmt,args); va_end(args); printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id, @@ -461,7 +461,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...) va_list args; va_start(args,fmt); - vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); + vsprintf(ErrorBuffer,fmt,args); va_end(args); printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id, diff --git a/trunk/fs/affs/bitmap.c b/trunk/fs/affs/bitmap.c index b330009fe42d..b0b953683c1a 100644 --- a/trunk/fs/affs/bitmap.c +++ b/trunk/fs/affs/bitmap.c @@ -289,11 +289,12 @@ int affs_init_bitmap(struct super_block *sb, int *flags) sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved + sbi->s_bmap_bits - 1) / sbi->s_bmap_bits; size = sbi->s_bmap_count * sizeof(*bm); - bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL); + bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL); if (!sbi->s_bitmap) { printk(KERN_ERR "AFFS: Bitmap allocation failed\n"); return -ENOMEM; } + memset(sbi->s_bitmap, 0, size); bmap_blk = (__be32 *)sbi->s_root_bh->b_data; blk = sb->s_blocksize / 4 - 49; diff --git a/trunk/fs/affs/super.c b/trunk/fs/affs/super.c index 3de93e799949..5ea72c3a16c3 100644 --- a/trunk/fs/affs/super.c +++ b/trunk/fs/affs/super.c @@ -66,12 +66,12 @@ affs_write_super(struct super_block *sb) pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean); } -static struct kmem_cache * affs_inode_cachep; +static kmem_cache_t * affs_inode_cachep; static struct inode *affs_alloc_inode(struct super_block *sb) { struct affs_inode_info *ei; - ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL); + ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode) kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; diff --git a/trunk/fs/afs/kafsasyncd.c b/trunk/fs/afs/kafsasyncd.c index 615df2407cb2..f09a794f248e 100644 --- a/trunk/fs/afs/kafsasyncd.c +++ b/trunk/fs/afs/kafsasyncd.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "cell.h" #include "server.h" #include "volume.h" diff --git a/trunk/fs/afs/kafstimod.c b/trunk/fs/afs/kafstimod.c index 694344e4d3c7..65bc05ab8182 100644 --- a/trunk/fs/afs/kafstimod.c +++ b/trunk/fs/afs/kafstimod.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "cell.h" #include "volume.h" #include "kafstimod.h" diff --git a/trunk/fs/afs/server.c b/trunk/fs/afs/server.c index 44aff81dc6a7..22afaae1a4ce 100644 --- a/trunk/fs/afs/server.c +++ b/trunk/fs/afs/server.c @@ -55,12 +55,13 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, _enter("%p,%08x,", cell, ntohl(addr->s_addr)); /* allocate and initialise a server record */ - server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); + server = kmalloc(sizeof(struct afs_server), GFP_KERNEL); if (!server) { _leave(" = -ENOMEM"); return -ENOMEM; } + memset(server, 0, sizeof(struct afs_server)); atomic_set(&server->usage, 1); INIT_LIST_HEAD(&server->link); diff --git a/trunk/fs/afs/super.c b/trunk/fs/afs/super.c index 18d9b77ba40f..67d1f5c819ec 100644 --- a/trunk/fs/afs/super.c +++ b/trunk/fs/afs/super.c @@ -35,7 +35,7 @@ struct afs_mount_params { struct afs_volume *volume; }; -static void afs_i_init_once(void *foo, struct kmem_cache *cachep, +static void afs_i_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags); static int afs_get_sb(struct file_system_type *fs_type, @@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = { .put_super = afs_put_super, }; -static struct kmem_cache *afs_inode_cachep; +static kmem_cache_t *afs_inode_cachep; static atomic_t afs_count_active_inodes; /*****************************************************************************/ @@ -242,12 +242,14 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent) kenter(""); /* allocate a superblock info record */ - as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); + as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { _leave(" = -ENOMEM"); return -ENOMEM; } + memset(as, 0, sizeof(struct afs_super_info)); + afs_get_volume(params->volume); as->volume = params->volume; @@ -382,7 +384,7 @@ static void afs_put_super(struct super_block *sb) /* * initialise an inode cache slab element prior to any use */ -static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, +static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, unsigned long flags) { struct afs_vnode *vnode = (struct afs_vnode *) _vnode; @@ -410,7 +412,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb) struct afs_vnode *vnode; vnode = (struct afs_vnode *) - kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); + kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL); if (!vnode) return NULL; diff --git a/trunk/fs/aio.c b/trunk/fs/aio.c index d3a6ec2c9627..277a5f2d18ad 100644 --- a/trunk/fs/aio.c +++ b/trunk/fs/aio.c @@ -47,19 +47,19 @@ unsigned long aio_nr; /* current system wide number of aio requests */ unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ -static struct kmem_cache *kiocb_cachep; -static struct kmem_cache *kioctx_cachep; +static kmem_cache_t *kiocb_cachep; +static kmem_cache_t *kioctx_cachep; static struct workqueue_struct *aio_wq; /* Used for rare fput completion. */ -static void aio_fput_routine(struct work_struct *); -static DECLARE_WORK(fput_work, aio_fput_routine); +static void aio_fput_routine(void *); +static DECLARE_WORK(fput_work, aio_fput_routine, NULL); static DEFINE_SPINLOCK(fput_lock); static LIST_HEAD(fput_head); -static void aio_kick_handler(struct work_struct *); +static void aio_kick_handler(void *); static void aio_queue_work(struct kioctx *); /* aio_setup @@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) INIT_LIST_HEAD(&ctx->active_reqs); INIT_LIST_HEAD(&ctx->run_list); - INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); + INIT_WORK(&ctx->wq, aio_kick_handler, ctx); if (aio_setup_ring(ctx) < 0) goto out_freectx; @@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) wake_up(&ctx->wait); } -static void aio_fput_routine(struct work_struct *data) +static void aio_fput_routine(void *data) { spin_lock_irq(&fput_lock); while (likely(!list_empty(&fput_head))) { @@ -666,6 +666,17 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) ssize_t (*retry)(struct kiocb *); ssize_t ret; + if (iocb->ki_retried++ > 1024*1024) { + printk("Maximal retry count. Bytes done %Zd\n", + iocb->ki_nbytes - iocb->ki_left); + return -EAGAIN; + } + + if (!(iocb->ki_retried & 0xff)) { + pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); + } + if (!(retry = iocb->ki_retry)) { printk("aio_run_iocb: iocb->ki_retry = NULL\n"); return 0; @@ -846,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx) * space. * Run on aiod's context. */ -static void aio_kick_handler(struct work_struct *work) +static void aio_kick_handler(void *data) { - struct kioctx *ctx = container_of(work, struct kioctx, wq.work); + struct kioctx *ctx = data; mm_segment_t oldfs = get_fs(); int requeue; @@ -863,7 +874,7 @@ static void aio_kick_handler(struct work_struct *work) * we're in a worker thread already, don't use queue_delayed_work, */ if (requeue) - queue_delayed_work(aio_wq, &ctx->wq, 0); + queue_work(aio_wq, &ctx->wq); } @@ -994,6 +1005,9 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) kunmap_atomic(ring, KM_IRQ1); pr_debug("added to ring %p at [%lu]\n", iocb, tail); + + pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); @@ -1399,6 +1413,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb) kiocb->ki_iovec->iov_len = kiocb->ki_left; kiocb->ki_nr_segs = 1; kiocb->ki_cur_seg = 0; + kiocb->ki_nbytes = kiocb->ki_left; return 0; } @@ -1576,6 +1591,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, req->ki_opcode = iocb->aio_lio_opcode; init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); INIT_LIST_HEAD(&req->ki_wait.task_list); + req->ki_retried = 0; ret = aio_setup_iocb(req); diff --git a/trunk/fs/autofs/inode.c b/trunk/fs/autofs/inode.c index f968d1342808..38ede5c9d6fd 100644 --- a/trunk/fs/autofs/inode.c +++ b/trunk/fs/autofs/inode.c @@ -28,11 +28,10 @@ void autofs_kill_sb(struct super_block *sb) /* * In the event of a failure in get_sb_nodev the superblock * info is not present so nothing else has been setup, so - * just call kill_anon_super when we are called from - * deactivate_super. + * just exit when we are called from deactivate_super. */ if (!sbi) - goto out_kill_sb; + return; if ( !sbi->catatonic ) autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */ @@ -45,7 +44,6 @@ void autofs_kill_sb(struct super_block *sb) kfree(sb->s_fs_info); -out_kill_sb: DPRINTK(("autofs: shutting down\n")); kill_anon_super(sb); } @@ -211,6 +209,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) fail_free: kfree(sbi); s->s_fs_info = NULL; + kill_anon_super(s); fail_unlock: return -EINVAL; } diff --git a/trunk/fs/autofs4/inode.c b/trunk/fs/autofs4/inode.c index 9c48250fd726..ce7c0f1dd529 100644 --- a/trunk/fs/autofs4/inode.c +++ b/trunk/fs/autofs4/inode.c @@ -152,11 +152,10 @@ void autofs4_kill_sb(struct super_block *sb) /* * In the event of a failure in get_sb_nodev the superblock * info is not present so nothing else has been setup, so - * just call kill_anon_super when we are called from - * deactivate_super. + * just exit when we are called from deactivate_super. */ if (!sbi) - goto out_kill_sb; + return; sb->s_fs_info = NULL; @@ -168,7 +167,6 @@ void autofs4_kill_sb(struct super_block *sb) kfree(sbi); -out_kill_sb: DPRINTK("shutting down"); kill_anon_super(sb); } @@ -428,6 +426,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) fail_free: kfree(sbi); s->s_fs_info = NULL; + kill_anon_super(s); fail_unlock: return -EINVAL; } diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c index bce402eee554..07f7144f0e2e 100644 --- a/trunk/fs/befs/linuxvfs.c +++ b/trunk/fs/befs/linuxvfs.c @@ -61,7 +61,7 @@ static const struct super_operations befs_sops = { }; /* slab cache for befs_inode_info objects */ -static struct kmem_cache *befs_inode_cachep; +static kmem_cache_t *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, @@ -277,7 +277,7 @@ befs_alloc_inode(struct super_block *sb) { struct befs_inode_info *bi; bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, - GFP_KERNEL); + SLAB_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; @@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode) kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct befs_inode_info *bi = (struct befs_inode_info *) foo; diff --git a/trunk/fs/bfs/inode.c b/trunk/fs/bfs/inode.c index eac175ed9f44..ed27ffb3459e 100644 --- a/trunk/fs/bfs/inode.c +++ b/trunk/fs/bfs/inode.c @@ -228,12 +228,12 @@ static void bfs_write_super(struct super_block *s) unlock_kernel(); } -static struct kmem_cache * bfs_inode_cachep; +static kmem_cache_t * bfs_inode_cachep; static struct inode *bfs_alloc_inode(struct super_block *sb) { struct bfs_inode_info *bi; - bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL); + bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; @@ -244,7 +244,7 @@ static void bfs_destroy_inode(struct inode *inode) kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct bfs_inode_info *bi = foo; diff --git a/trunk/fs/binfmt_elf.c b/trunk/fs/binfmt_elf.c index be5869d34999..cc72bb43061d 100644 --- a/trunk/fs/binfmt_elf.c +++ b/trunk/fs/binfmt_elf.c @@ -47,6 +47,10 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); static int load_elf_library(struct file *); static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); +#ifndef elf_addr_t +#define elf_addr_t unsigned long +#endif + /* * If we don't support core dumping, then supply a NULL so we * don't even try. @@ -239,9 +243,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, if (interp_aout) { argv = sp + 2; envp = argv + argc + 1; - if (__put_user((elf_addr_t)(unsigned long)argv, sp++) || - __put_user((elf_addr_t)(unsigned long)envp, sp++)) - return -EFAULT; + __put_user((elf_addr_t)(unsigned long)argv, sp++); + __put_user((elf_addr_t)(unsigned long)envp, sp++); } else { argv = sp; envp = argv + argc + 1; @@ -251,8 +254,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, p = current->mm->arg_end = current->mm->arg_start; while (argc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, argv++)) - return -EFAULT; + __put_user((elf_addr_t)p, argv++); len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return 0; @@ -263,8 +265,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, current->mm->arg_end = current->mm->env_start = p; while (envc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, envp++)) - return -EFAULT; + __put_user((elf_addr_t)p, envp++); len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return 0; @@ -544,7 +545,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) unsigned long reloc_func_desc = 0; char passed_fileno[6]; struct files_struct *files; - int executable_stack = EXSTACK_DEFAULT; + int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; struct { struct elfhdr elf_ex; @@ -707,6 +708,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) executable_stack = EXSTACK_DISABLE_X; break; } + have_pt_gnu_stack = (i < loc->elf_ex.e_phnum); /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { @@ -854,13 +856,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) * default mmap base, as well as whatever program they * might try to exec. This is because the brk will * follow the loader, and is not movable. */ - if (current->flags & PF_RANDOMIZE) - load_bias = randomize_range(0x10000, - ELF_ET_DYN_BASE, - 0); - else - load_bias = ELF_ET_DYN_BASE; - load_bias = ELF_PAGESTART(load_bias - vaddr); + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, diff --git a/trunk/fs/binfmt_elf_fdpic.c b/trunk/fs/binfmt_elf_fdpic.c index ed9a61c6beb3..f86d5c9ce5eb 100644 --- a/trunk/fs/binfmt_elf_fdpic.c +++ b/trunk/fs/binfmt_elf_fdpic.c @@ -40,6 +40,9 @@ #include typedef char *elf_caddr_t; +#ifndef elf_addr_t +#define elf_addr_t unsigned long +#endif #if 0 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c index 7ec737eda72b..aa4d09bd4e71 100644 --- a/trunk/fs/bio.c +++ b/trunk/fs/bio.c @@ -30,7 +30,7 @@ #define BIO_POOL_SIZE 256 -static struct kmem_cache *bio_slab __read_mostly; +static kmem_cache_t *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; char *name; - struct kmem_cache *slab; + kmem_cache_t *slab; }; /* @@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio) * run one bio_put() against the BIO. */ -static void bio_dirty_fn(struct work_struct *work); +static void bio_dirty_fn(void *data); -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ -static void bio_dirty_fn(struct work_struct *work) +static void bio_dirty_fn(void *data) { unsigned long flags; struct bio *bio; diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 13816b4d76f6..36c0e7af9d0f 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -235,11 +235,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static struct kmem_cache * bdev_cachep __read_mostly; +static kmem_cache_t * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -253,7 +253,7 @@ static void bdev_destroy_inode(struct inode *inode) kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 517860f2d75b..35527dca1dbc 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -2908,7 +2908,7 @@ asmlinkage long sys_bdflush(int func, long data) /* * Buffer-head allocation */ -static struct kmem_cache *bh_cachep; +static kmem_cache_t *bh_cachep; /* * Once the number of bh's in the machine exceeds this level, we start @@ -2961,7 +2961,7 @@ void free_buffer_head(struct buffer_head *bh) EXPORT_SYMBOL(free_buffer_head); static void -init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) +init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) { if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { @@ -2972,6 +2972,7 @@ init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) } } +#ifdef CONFIG_HOTPLUG_CPU static void buffer_exit_cpu(int cpu) { int i; @@ -2993,6 +2994,7 @@ static int buffer_cpu_notify(struct notifier_block *self, buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ void __init buffer_init(void) { diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index 71bc87a37fc1..84976cdbe713 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -34,7 +34,6 @@ #include #include #include -#include #include "cifsfs.h" #include "cifspdu.h" #define DECLARE_GLOBALS_HERE @@ -82,7 +81,7 @@ extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; -extern struct kmem_cache *cifs_oplock_cachep; +extern kmem_cache_t *cifs_oplock_cachep; static int cifs_read_super(struct super_block *sb, void *data, @@ -233,11 +232,11 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd) return generic_permission(inode, mask, NULL); } -static struct kmem_cache *cifs_inode_cachep; -static struct kmem_cache *cifs_req_cachep; -static struct kmem_cache *cifs_mid_cachep; -struct kmem_cache *cifs_oplock_cachep; -static struct kmem_cache *cifs_sm_req_cachep; +static kmem_cache_t *cifs_inode_cachep; +static kmem_cache_t *cifs_req_cachep; +static kmem_cache_t *cifs_mid_cachep; +kmem_cache_t *cifs_oplock_cachep; +static kmem_cache_t *cifs_sm_req_cachep; mempool_t *cifs_sm_req_poolp; mempool_t *cifs_req_poolp; mempool_t *cifs_mid_poolp; @@ -246,7 +245,7 @@ static struct inode * cifs_alloc_inode(struct super_block *sb) { struct cifsInodeInfo *cifs_inode; - cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL); + cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL); if (!cifs_inode) return NULL; cifs_inode->cifsAttrs = 0x20; /* default */ @@ -669,7 +668,7 @@ const struct file_operations cifs_dir_ops = { }; static void -cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) +cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags) { struct cifsInodeInfo *cifsi = inode; diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 2caca06b4bae..71f77914ce93 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include "cifspdu.h" diff --git a/trunk/fs/cifs/misc.c b/trunk/fs/cifs/misc.c index aedf683f011f..bbc9cd34b6ea 100644 --- a/trunk/fs/cifs/misc.c +++ b/trunk/fs/cifs/misc.c @@ -153,7 +153,7 @@ cifs_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS); /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ @@ -192,7 +192,7 @@ cifs_small_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS); if (ret_buf) { /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ diff --git a/trunk/fs/cifs/transport.c b/trunk/fs/cifs/transport.c index f80007eaebf4..48d47b46b1fb 100644 --- a/trunk/fs/cifs/transport.c +++ b/trunk/fs/cifs/transport.c @@ -34,7 +34,7 @@ #include "cifs_debug.h" extern mempool_t *cifs_mid_poolp; -extern struct kmem_cache *cifs_oplock_cachep; +extern kmem_cache_t *cifs_oplock_cachep; static struct mid_q_entry * AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) @@ -51,7 +51,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) } temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp, - GFP_KERNEL | GFP_NOFS); + SLAB_KERNEL | SLAB_NOFS); if (temp == NULL) return temp; else { @@ -118,7 +118,7 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon) return NULL; } temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, - GFP_KERNEL); + SLAB_KERNEL); if (temp == NULL) return temp; else { diff --git a/trunk/fs/coda/inode.c b/trunk/fs/coda/inode.c index b64659fa82d0..88d123321164 100644 --- a/trunk/fs/coda/inode.c +++ b/trunk/fs/coda/inode.c @@ -38,12 +38,12 @@ static void coda_clear_inode(struct inode *); static void coda_put_super(struct super_block *); static int coda_statfs(struct dentry *dentry, struct kstatfs *buf); -static struct kmem_cache * coda_inode_cachep; +static kmem_cache_t * coda_inode_cachep; static struct inode *coda_alloc_inode(struct super_block *sb) { struct coda_inode_info *ei; - ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); + ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; memset(&ei->c_fid, 0, sizeof(struct CodaFid)); @@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode) kmem_cache_free(coda_inode_cachep, ITOC(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct coda_inode_info *ei = (struct coda_inode_info *) foo; diff --git a/trunk/fs/compat.c b/trunk/fs/compat.c index a7e3f162fb15..06dad665b88f 100644 --- a/trunk/fs/compat.c +++ b/trunk/fs/compat.c @@ -871,7 +871,7 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name, retval = -EINVAL; - if (type_page && data_page) { + if (type_page) { if (!strcmp((char *)type_page, SMBFS_NAME)) { do_smb_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NCPFS_NAME)) { @@ -1144,9 +1144,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, lastdirent = buf.previous; if (lastdirent) { typeof(lastdirent->d_off) d_off = file->f_pos; - error = -EFAULT; - if (__put_user_unaligned(d_off, &lastdirent->d_off)) - goto out_putf; + __put_user_unaligned(d_off, &lastdirent->d_off); error = count - buf.count; } @@ -1613,14 +1611,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, nr &= ~1UL; while (nr) { unsigned long h, l; - if (__get_user(l, ufdset) || __get_user(h, ufdset+1)) - return -EFAULT; + __get_user(l, ufdset); + __get_user(h, ufdset+1); ufdset += 2; *fdset++ = h << 32 | l; nr -= 2; } - if (odd && __get_user(*fdset, ufdset)) - return -EFAULT; + if (odd) + __get_user(*fdset, ufdset); } else { /* Tricky, must clear full unsigned long in the * kernel fdset at the end, this makes sure that @@ -1632,14 +1630,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, } static -int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, - unsigned long *fdset) +void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, + unsigned long *fdset) { unsigned long odd; nr = ROUND_UP(nr, __COMPAT_NFDBITS); if (!ufdset) - return 0; + return; odd = nr & 1UL; nr &= ~1UL; @@ -1647,14 +1645,13 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long h, l; l = *fdset++; h = l >> 32; - if (__put_user(l, ufdset) || __put_user(h, ufdset+1)) - return -EFAULT; + __put_user(l, ufdset); + __put_user(h, ufdset+1); ufdset += 2; nr -= 2; } - if (odd && __put_user(*fdset, ufdset)) - return -EFAULT; - return 0; + if (odd) + __put_user(*fdset, ufdset); } @@ -1729,10 +1726,10 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, ret = 0; } - if (compat_set_fd_set(n, inp, fds.res_in) || - compat_set_fd_set(n, outp, fds.res_out) || - compat_set_fd_set(n, exp, fds.res_ex)) - ret = -EFAULT; + compat_set_fd_set(n, inp, fds.res_in); + compat_set_fd_set(n, outp, fds.res_out); + compat_set_fd_set(n, exp, fds.res_ex); + out: kfree(bits); out_nofds: diff --git a/trunk/fs/compat_ioctl.c b/trunk/fs/compat_ioctl.c index 6e3177685556..a91f2628c981 100644 --- a/trunk/fs/compat_ioctl.c +++ b/trunk/fs/compat_ioctl.c @@ -211,10 +211,8 @@ static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned lon up_native = compat_alloc_user_space(sizeof(struct video_still_picture)); - err = put_user(compat_ptr(fp), &up_native->iFrame); - err |= put_user(size, &up_native->size); - if (err) - return -EFAULT; + put_user(compat_ptr(fp), &up_native->iFrame); + put_user(size, &up_native->size); err = sys_ioctl(fd, cmd, (unsigned long) up_native); @@ -238,10 +236,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned err |= get_user(length, &up->length); up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); - err = put_user(compat_ptr(palp), &up_native->palette); - err |= put_user(length, &up_native->length); - if (err) - return -EFAULT; + put_user(compat_ptr(palp), &up_native->palette); + put_user(length, &up_native->length); err = sys_ioctl(fd, cmd, (unsigned long) up_native); @@ -2047,19 +2043,16 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg) struct serial_struct ss; mm_segment_t oldseg = get_fs(); __u32 udata; - unsigned int base; if (cmd == TIOCSSERIAL) { if (!access_ok(VERIFY_READ, ss32, sizeof(SS32))) return -EFAULT; if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base))) return -EFAULT; - if (__get_user(udata, &ss32->iomem_base)) - return -EFAULT; + __get_user(udata, &ss32->iomem_base); ss.iomem_base = compat_ptr(udata); - if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || - __get_user(ss.port_high, &ss32->port_high)) - return -EFAULT; + __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); + __get_user(ss.port_high, &ss32->port_high); ss.iomap_base = 0UL; } set_fs(KERNEL_DS); @@ -2070,12 +2063,12 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg) return -EFAULT; if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base))) return -EFAULT; - base = (unsigned long)ss.iomem_base >> 32 ? - 0xffffffff : (unsigned)(unsigned long)ss.iomem_base; - if (__put_user(base, &ss32->iomem_base) || - __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || - __put_user(ss.port_high, &ss32->port_high)) - return -EFAULT; + __put_user((unsigned long)ss.iomem_base >> 32 ? + 0xffffffff : (unsigned)(unsigned long)ss.iomem_base, + &ss32->iomem_base); + __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); + __put_user(ss.port_high, &ss32->port_high); + } return err; } diff --git a/trunk/fs/configfs/configfs_internal.h b/trunk/fs/configfs/configfs_internal.h index f92cd303d2c9..3f4ff7a242b9 100644 --- a/trunk/fs/configfs/configfs_internal.h +++ b/trunk/fs/configfs/configfs_internal.h @@ -49,7 +49,7 @@ struct configfs_dirent { #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) extern struct vfsmount * configfs_mount; -extern struct kmem_cache *configfs_dir_cachep; +extern kmem_cache_t *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); diff --git a/trunk/fs/configfs/mount.c b/trunk/fs/configfs/mount.c index ed678529ebb2..68bd5c93ca52 100644 --- a/trunk/fs/configfs/mount.c +++ b/trunk/fs/configfs/mount.c @@ -38,7 +38,7 @@ struct vfsmount * configfs_mount = NULL; struct super_block * configfs_sb = NULL; -struct kmem_cache *configfs_dir_cachep; +kmem_cache_t *configfs_dir_cachep; static int configfs_mnt_count = 0; static struct super_operations configfs_ops = { diff --git a/trunk/fs/cramfs/inode.c b/trunk/fs/cramfs/inode.c index 0509cedd415c..a624c3ec8189 100644 --- a/trunk/fs/cramfs/inode.c +++ b/trunk/fs/cramfs/inode.c @@ -481,8 +481,6 @@ static int cramfs_readpage(struct file *file, struct page * page) pgdata = kmap(page); if (compr_len == 0) ; /* hole */ - else if (compr_len > (PAGE_CACHE_SIZE << 1)) - printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len); else { mutex_lock(&read_mutex); bytes_filled = cramfs_uncompress_block(pgdata, diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index d68631f18df1..fd4a428998ef 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -43,7 +43,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(dcache_lock); -static struct kmem_cache *dentry_cache __read_mostly; +static kmem_cache_t *dentry_cache __read_mostly; #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) @@ -68,19 +68,15 @@ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; -static void __d_free(struct dentry *dentry) +static void d_callback(struct rcu_head *head) { + struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); + if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); } -static void d_callback(struct rcu_head *head) -{ - struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); - __d_free(dentry); -} - /* * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry * inside dcache_lock. @@ -89,11 +85,7 @@ static void d_free(struct dentry *dentry) { if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); - /* if dentry was never inserted into hash, immediate free is OK */ - if (dentry->d_hash.pprev == NULL) - __d_free(dentry); - else - call_rcu(&dentry->d_u.d_rcu, d_callback); + call_rcu(&dentry->d_u.d_rcu, d_callback); } /* @@ -2080,10 +2072,10 @@ static void __init dcache_init(unsigned long mempages) } /* SLAB cache for __getname() consumers */ -struct kmem_cache *names_cachep __read_mostly; +kmem_cache_t *names_cachep __read_mostly; /* SLAB cache for file structures */ -struct kmem_cache *filp_cachep __read_mostly; +kmem_cache_t *filp_cachep __read_mostly; EXPORT_SYMBOL(d_genocide); diff --git a/trunk/fs/dcookies.c b/trunk/fs/dcookies.c index 21af1629f9bc..0c4b0674854b 100644 --- a/trunk/fs/dcookies.c +++ b/trunk/fs/dcookies.c @@ -37,7 +37,7 @@ struct dcookie_struct { static LIST_HEAD(dcookie_users); static DEFINE_MUTEX(dcookie_mutex); -static struct kmem_cache *dcookie_cache __read_mostly; +static kmem_cache_t *dcookie_cache __read_mostly; static struct list_head *dcookie_hashtable __read_mostly; static size_t hash_size __read_mostly; diff --git a/trunk/fs/dlm/memory.c b/trunk/fs/dlm/memory.c index 5352b03ff5aa..989b608fd836 100644 --- a/trunk/fs/dlm/memory.c +++ b/trunk/fs/dlm/memory.c @@ -15,7 +15,7 @@ #include "config.h" #include "memory.h" -static struct kmem_cache *lkb_cache; +static kmem_cache_t *lkb_cache; int dlm_memory_init(void) diff --git a/trunk/fs/dnotify.c b/trunk/fs/dnotify.c index 1f26a2b9eee1..2b0442db67e0 100644 --- a/trunk/fs/dnotify.c +++ b/trunk/fs/dnotify.c @@ -23,7 +23,7 @@ int dir_notify_enable __read_mostly = 1; -static struct kmem_cache *dn_cache __read_mostly; +static kmem_cache_t *dn_cache __read_mostly; static void redo_inode_mask(struct inode *inode) { @@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) inode = filp->f_dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); + dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL); if (dn == NULL) return -ENOMEM; spin_lock(&inode->i_lock); diff --git a/trunk/fs/dquot.c b/trunk/fs/dquot.c index f9cd5e23ebdf..9af789567e51 100644 --- a/trunk/fs/dquot.c +++ b/trunk/fs/dquot.c @@ -131,7 +131,7 @@ static struct quota_format_type *quota_formats; /* List of registered formats */ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; /* SLAB cache for dquot structures */ -static struct kmem_cache *dquot_cachep; +static kmem_cache_t *dquot_cachep; int register_quota_format(struct quota_format_type *fmt) { @@ -600,7 +600,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; - dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS); + dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS); if(!dquot) return NODQUOT; diff --git a/trunk/fs/ecryptfs/crypto.c b/trunk/fs/ecryptfs/crypto.c index 7196f50fe152..f63a7755fe86 100644 --- a/trunk/fs/ecryptfs/crypto.c +++ b/trunk/fs/ecryptfs/crypto.c @@ -628,7 +628,7 @@ int ecryptfs_decrypt_page(struct file *file, struct page *page) num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size; base_extent = (page->index * num_extents_per_page); lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache, - GFP_KERNEL); + SLAB_KERNEL); if (!lower_page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Error getting page for encrypted " @@ -1334,7 +1334,7 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry, goto out; } /* Released in this function */ - page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER); + page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER); if (!page_virt) { ecryptfs_printk(KERN_ERR, "Out of memory\n"); rc = -ENOMEM; @@ -1493,7 +1493,7 @@ int ecryptfs_read_headers(struct dentry *ecryptfs_dentry, &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; /* Read the first page from the underlying file */ - page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER); + page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER); if (!page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n"); diff --git a/trunk/fs/ecryptfs/file.c b/trunk/fs/ecryptfs/file.c index 42099e779a56..a92ef05eff8f 100644 --- a/trunk/fs/ecryptfs/file.c +++ b/trunk/fs/ecryptfs/file.c @@ -250,7 +250,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) int lower_flags; /* Released in ecryptfs_release or end of function if failure */ - file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL); + file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL); ecryptfs_set_file_private(file, file_info); if (!file_info) { ecryptfs_printk(KERN_ERR, diff --git a/trunk/fs/ecryptfs/inode.c b/trunk/fs/ecryptfs/inode.c index 8a1945a84c36..dfcc68484f47 100644 --- a/trunk/fs/ecryptfs/inode.c +++ b/trunk/fs/ecryptfs/inode.c @@ -369,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, BUG_ON(!atomic_read(&lower_dentry->d_count)); ecryptfs_set_dentry_private(dentry, kmem_cache_alloc(ecryptfs_dentry_info_cache, - GFP_KERNEL)); + SLAB_KERNEL)); if (!ecryptfs_dentry_to_private(dentry)) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting " @@ -404,7 +404,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, /* Released in this function */ page_virt = (char *)kmem_cache_alloc(ecryptfs_header_cache_2, - GFP_USER); + SLAB_USER); if (!page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, @@ -795,7 +795,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) /* Released at out_free: label */ ecryptfs_set_file_private(&fake_ecryptfs_file, kmem_cache_alloc(ecryptfs_file_info_cache, - GFP_KERNEL)); + SLAB_KERNEL)); if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) { rc = -ENOMEM; goto out; diff --git a/trunk/fs/ecryptfs/keystore.c b/trunk/fs/ecryptfs/keystore.c index 745c0f1bfbbd..c3746f56d162 100644 --- a/trunk/fs/ecryptfs/keystore.c +++ b/trunk/fs/ecryptfs/keystore.c @@ -207,7 +207,7 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or * at end of function upon failure */ auth_tok_list_item = - kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL); + kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL); if (!auth_tok_list_item) { ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n"); rc = -ENOMEM; diff --git a/trunk/fs/ecryptfs/main.c b/trunk/fs/ecryptfs/main.c index 3ede12b25933..a78d87d14baf 100644 --- a/trunk/fs/ecryptfs/main.c +++ b/trunk/fs/ecryptfs/main.c @@ -378,7 +378,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) /* Released in ecryptfs_put_super() */ ecryptfs_set_superblock_private(sb, kmem_cache_alloc(ecryptfs_sb_info_cache, - GFP_KERNEL)); + SLAB_KERNEL)); if (!ecryptfs_superblock_to_private(sb)) { ecryptfs_printk(KERN_WARNING, "Out of memory\n"); rc = -ENOMEM; @@ -402,7 +402,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) /* through deactivate_super(sb) from get_sb_nodev() */ ecryptfs_set_dentry_private(sb->s_root, kmem_cache_alloc(ecryptfs_dentry_info_cache, - GFP_KERNEL)); + SLAB_KERNEL)); if (!ecryptfs_dentry_to_private(sb->s_root)) { ecryptfs_printk(KERN_ERR, "dentry_info_cache alloc failed\n"); @@ -546,7 +546,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags) } static struct ecryptfs_cache_info { - struct kmem_cache **cache; + kmem_cache_t **cache; const char *name; size_t size; void (*ctor)(void*, struct kmem_cache *, unsigned long); @@ -691,7 +691,7 @@ static ssize_t version_show(struct ecryptfs_obj *obj, char *buff) static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version); -static struct ecryptfs_version_str_map_elem { +struct ecryptfs_version_str_map_elem { u32 flag; char *str; } ecryptfs_version_str_map[] = { diff --git a/trunk/fs/ecryptfs/super.c b/trunk/fs/ecryptfs/super.c index eaa5daaf106e..825757ae4867 100644 --- a/trunk/fs/ecryptfs/super.c +++ b/trunk/fs/ecryptfs/super.c @@ -50,7 +50,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb) struct inode *inode = NULL; ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache, - GFP_KERNEL); + SLAB_KERNEL); if (unlikely(!ecryptfs_inode)) goto out; ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat); diff --git a/trunk/fs/efs/super.c b/trunk/fs/efs/super.c index dfebf21289f4..b3f50651eb6b 100644 --- a/trunk/fs/efs/super.c +++ b/trunk/fs/efs/super.c @@ -52,12 +52,12 @@ static struct pt_types sgi_pt_types[] = { }; -static struct kmem_cache * efs_inode_cachep; +static kmem_cache_t * efs_inode_cachep; static struct inode *efs_alloc_inode(struct super_block *sb) { struct efs_inode_info *ei; - ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL); + ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -68,7 +68,7 @@ static void efs_destroy_inode(struct inode *inode) kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index 88a6f8d0b88e..ae228ec54e94 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -283,10 +283,10 @@ static struct mutex epmutex; static struct poll_safewake psw; /* Slab cache used to allocate "struct epitem" */ -static struct kmem_cache *epi_cache __read_mostly; +static kmem_cache_t *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ -static struct kmem_cache *pwq_cache __read_mostly; +static kmem_cache_t *pwq_cache __read_mostly; /* Virtual fs used to allocate inodes for eventpoll files */ static struct vfsmount *eventpoll_mnt __read_mostly; @@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; - if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { + if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; @@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct ep_pqueue epq; error = -ENOMEM; - if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) + if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) goto eexit_1; /* Item initialization follow here ... */ diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c index add0e03c3ea9..d993ea1a81ae 100644 --- a/trunk/fs/exec.c +++ b/trunk/fs/exec.c @@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!mpnt) return -ENOMEM; @@ -1515,8 +1515,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) ispipe = 1; } else file = filp_open(corename, - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, - 0600); + O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600); if (IS_ERR(file)) goto fail_unlock; inode = file->f_dentry->d_inode; diff --git a/trunk/fs/ext2/ioctl.c b/trunk/fs/ext2/ioctl.c index e3cf8c81507f..1dfba77eab10 100644 --- a/trunk/fs/ext2/ioctl.c +++ b/trunk/fs/ext2/ioctl.c @@ -44,7 +44,6 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, if (!S_ISDIR(inode->i_mode)) flags &= ~EXT2_DIRSYNC_FL; - mutex_lock(&inode->i_mutex); oldflags = ei->i_flags; /* @@ -54,16 +53,13 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { - if (!capable(CAP_LINUX_IMMUTABLE)) { - mutex_unlock(&inode->i_mutex); + if (!capable(CAP_LINUX_IMMUTABLE)) return -EPERM; - } } flags = flags & EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; - mutex_unlock(&inode->i_mutex); ext2_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; diff --git a/trunk/fs/ext2/super.c b/trunk/fs/ext2/super.c index 255cef5f7420..d8b9abd95d07 100644 --- a/trunk/fs/ext2/super.c +++ b/trunk/fs/ext2/super.c @@ -135,12 +135,12 @@ static void ext2_put_super (struct super_block * sb) return; } -static struct kmem_cache * ext2_inode_cachep; +static kmem_cache_t * ext2_inode_cachep; static struct inode *ext2_alloc_inode(struct super_block *sb) { struct ext2_inode_info *ei; - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); + ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; #ifdef CONFIG_EXT2_FS_POSIX_ACL @@ -156,7 +156,7 @@ static void ext2_destroy_inode(struct inode *inode) kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; @@ -1090,10 +1090,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext2_sb_info *sbi = EXT2_SB(sb); - struct ext2_super_block *es = sbi->s_es; unsigned long overhead; int i; - u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -1106,7 +1104,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) * All of the blocks before first_data_block are * overhead */ - overhead = le32_to_cpu(es->s_first_data_block); + overhead = le32_to_cpu(sbi->s_es->s_first_data_block); /* * Add the overhead attributed to the superblock and @@ -1127,18 +1125,14 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_type = EXT2_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; - buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead; + buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead; buf->f_bfree = ext2_count_free_blocks(sb); - buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); - if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) + buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count); + if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count)) buf->f_bavail = 0; - buf->f_files = le32_to_cpu(es->s_inodes_count); - buf->f_ffree = ext2_count_free_inodes(sb); + buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count); + buf->f_ffree = ext2_count_free_inodes (sb); buf->f_namelen = EXT2_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); - buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; - buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/trunk/fs/ext2/xattr.c b/trunk/fs/ext2/xattr.c index 247efd0b51d6..af52a7f8b291 100644 --- a/trunk/fs/ext2/xattr.c +++ b/trunk/fs/ext2/xattr.c @@ -342,9 +342,12 @@ static void ext2_xattr_update_super_block(struct super_block *sb) if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) return; - EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); + lock_super(sb); + EXT2_SB(sb)->s_es->s_feature_compat |= + cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; mark_buffer_dirty(EXT2_SB(sb)->s_sbh); + unlock_super(sb); } /* diff --git a/trunk/fs/ext3/Makefile b/trunk/fs/ext3/Makefile index e77766a8b3f0..704cd44a40c2 100644 --- a/trunk/fs/ext3/Makefile +++ b/trunk/fs/ext3/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ - ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o diff --git a/trunk/fs/ext3/balloc.c b/trunk/fs/ext3/balloc.c index 22161740ba29..b41a7d7e20f0 100644 --- a/trunk/fs/ext3/balloc.c +++ b/trunk/fs/ext3/balloc.c @@ -144,7 +144,7 @@ static void __rsv_window_dump(struct rb_root *root, int verbose, printk("Block Allocation Reservation Windows Map (%s):\n", fn); while (n) { - rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); + rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); if (verbose) printk("reservation window 0x%p " "start: %lu, end: %lu\n", @@ -730,7 +730,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, here = 0; p = ((char *)bh->b_data) + (here >> 3); - r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); + r = memscan(p, 0, (maxblocks - here + 7) >> 3); next = (r - ((char *)bh->b_data)) << 3; if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) @@ -949,7 +949,7 @@ static int find_next_reservable_window( prev = rsv; next = rb_next(&rsv->rsv_node); - rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node); + rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); /* * Reached the last reservation, we can just append to the @@ -1148,7 +1148,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) return 0; /* success */ /* * if the first free bit we found is out of the reservable space @@ -1193,7 +1193,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, if (!next) my_rsv->rsv_end += size; else { - next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node); + next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) my_rsv->rsv_end += size; @@ -1271,7 +1271,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, } /* * grp_goal is a group relative block number (if there is a goal) - * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb) + * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb) * first block is a filesystem wide block number * first block is the block number of the first block in this group */ @@ -1307,14 +1307,10 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) grp_goal = -1; - } else if (grp_goal >= 0) { - int curr = my_rsv->rsv_end - - (grp_goal + group_first_block) + 1; - - if (curr < *count) - try_to_extend_reservation(my_rsv, sb, - *count - curr); - } + } else if (grp_goal > 0 && + (my_rsv->rsv_end-grp_goal+1) < *count) + try_to_extend_reservation(my_rsv, sb, + *count-my_rsv->rsv_end + grp_goal - 1); if ((my_rsv->rsv_start > group_last_block) || (my_rsv->rsv_end < group_first_block)) { @@ -1515,8 +1511,10 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, if (group_no >= ngroups) group_no = 0; gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); - if (!gdp) - goto io_error; + if (!gdp) { + *errp = -EIO; + goto out; + } free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * skip this group if the number of @@ -1550,7 +1548,6 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, */ if (my_rsv) { my_rsv = NULL; - windowsz = 0; group_no = goal_group; goto retry_alloc; } diff --git a/trunk/fs/ext3/dir.c b/trunk/fs/ext3/dir.c index 5a9313ecd4ef..d0b54f30b914 100644 --- a/trunk/fs/ext3/dir.c +++ b/trunk/fs/ext3/dir.c @@ -154,9 +154,6 @@ static int ext3_readdir(struct file * filp, ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", inode->i_ino, (unsigned long)filp->f_pos); - /* corrupt size? Maybe no more blocks to read */ - if (filp->f_pos > inode->i_blocks << 9) - break; filp->f_pos += sb->s_blocksize - offset; continue; } diff --git a/trunk/fs/ext3/ext3_jbd.c b/trunk/fs/ext3/ext3_jbd.c deleted file mode 100644 index e1f91fd26a93..000000000000 --- a/trunk/fs/ext3/ext3_jbd.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Interface between ext3 and JBD - */ - -#include - -int __ext3_journal_get_undo_access(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = journal_get_undo_access(handle, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext3_journal_get_write_access(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = journal_get_write_access(handle, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext3_journal_forget(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = journal_forget(handle, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext3_journal_revoke(const char *where, handle_t *handle, - unsigned long blocknr, struct buffer_head *bh) -{ - int err = journal_revoke(handle, blocknr, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext3_journal_get_create_access(const char *where, - handle_t *handle, struct buffer_head *bh) -{ - int err = journal_get_create_access(handle, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext3_journal_dirty_metadata(const char *where, - handle_t *handle, struct buffer_head *bh) -{ - int err = journal_dirty_metadata(handle, bh); - if (err) - ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c index beaf25f5112f..03ba5bcab186 100644 --- a/trunk/fs/ext3/inode.c +++ b/trunk/fs/ext3/inode.c @@ -1148,102 +1148,37 @@ static int do_journal_get_write_access(handle_t *handle, return ext3_journal_get_write_access(handle, bh); } -/* - * The idea of this helper function is following: - * if prepare_write has allocated some blocks, but not all of them, the - * transaction must include the content of the newly allocated blocks. - * This content is expected to be set to zeroes by block_prepare_write(). - * 2006/10/14 SAW - */ -static int ext3_prepare_failure(struct file *file, struct page *page, - unsigned from, unsigned to) -{ - struct address_space *mapping; - struct buffer_head *bh, *head, *next; - unsigned block_start, block_end; - unsigned blocksize; - int ret; - handle_t *handle = ext3_journal_current_handle(); - - mapping = page->mapping; - if (ext3_should_writeback_data(mapping->host)) { - /* optimization: no constraints about data */ -skip: - return ext3_journal_stop(handle); - } - - head = page_buffers(page); - blocksize = head->b_size; - for ( bh = head, block_start = 0; - bh != head || !block_start; - block_start = block_end, bh = next) - { - next = bh->b_this_page; - block_end = block_start + blocksize; - if (block_end <= from) - continue; - if (block_start >= to) { - block_start = to; - break; - } - if (!buffer_mapped(bh)) - /* prepare_write failed on this bh */ - break; - if (ext3_should_journal_data(mapping->host)) { - ret = do_journal_get_write_access(handle, bh); - if (ret) { - ext3_journal_stop(handle); - return ret; - } - } - /* - * block_start here becomes the first block where the current iteration - * of prepare_write failed. - */ - } - if (block_start <= from) - goto skip; - - /* commit allocated and zeroed buffers */ - return mapping->a_ops->commit_write(file, page, from, block_start); -} - static int ext3_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - int ret, ret2; - int needed_blocks = ext3_writepage_trans_blocks(inode); + int ret, needed_blocks = ext3_writepage_trans_blocks(inode); handle_t *handle; int retries = 0; retry: handle = ext3_journal_start(inode, needed_blocks); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_prepare_write(page, from, to, ext3_get_block); else ret = block_prepare_write(page, from, to, ext3_get_block); if (ret) - goto failure; + goto prepare_write_failed; if (ext3_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); - if (ret) - /* fatal error, just put the handle and return */ - journal_stop(handle); } - return ret; - -failure: - ret2 = ext3_prepare_failure(file, page, from, to); - if (ret2 < 0) - return ret2; +prepare_write_failed: + if (ret) + ext3_journal_stop(handle); if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; - /* retry number exceeded, or other error like -EDQUOT */ +out: return ret; } diff --git a/trunk/fs/ext3/namei.c b/trunk/fs/ext3/namei.c index 60d2f9dbdb00..906731a20f1a 100644 --- a/trunk/fs/ext3/namei.c +++ b/trunk/fs/ext3/namei.c @@ -552,15 +552,6 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0)); for (; de < top; de = ext3_next_entry(de)) { - if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, - (block<i_sb)) - +((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse (bh); - return count; - } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c index 580b8a6ca979..afc2d4f42d77 100644 --- a/trunk/fs/ext3/super.c +++ b/trunk/fs/ext3/super.c @@ -436,7 +436,7 @@ static void ext3_put_super (struct super_block * sb) return; } -static struct kmem_cache *ext3_inode_cachep; +static kmem_cache_t *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS @@ -445,7 +445,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; - ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); + ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS); if (!ei) return NULL; #ifdef CONFIG_EXT3_FS_POSIX_ACL @@ -462,7 +462,7 @@ static void ext3_destroy_inode(struct inode *inode) kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; @@ -1264,12 +1264,6 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } - if (bdev_read_only(sb->s_bdev)) { - printk(KERN_ERR "EXT3-fs: write access " - "unavailable, skipping orphan cleanup.\n"); - return; - } - if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " @@ -2393,7 +2387,6 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) struct ext3_super_block *es = sbi->s_es; ext3_fsblk_t overhead; int i; - u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -2440,10 +2433,6 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); - buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; - buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/trunk/fs/ext3/xattr.c b/trunk/fs/ext3/xattr.c index 99857a400f4b..f86f2482f01d 100644 --- a/trunk/fs/ext3/xattr.c +++ b/trunk/fs/ext3/xattr.c @@ -459,11 +459,14 @@ static void ext3_xattr_update_super_block(handle_t *handle, if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) return; + lock_super(sb); if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { - EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR); + EXT3_SB(sb)->s_es->s_feature_compat |= + cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); } + unlock_super(sb); } /* diff --git a/trunk/fs/ext4/Makefile b/trunk/fs/ext4/Makefile index ae6e7e502ac9..a6acb96ebeb9 100644 --- a/trunk/fs/ext4/Makefile +++ b/trunk/fs/ext4/Makefile @@ -5,8 +5,7 @@ obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ - ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ - ext4_jbd2.o + ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c index c4dd1103ccf1..5d45582f9517 100644 --- a/trunk/fs/ext4/balloc.c +++ b/trunk/fs/ext4/balloc.c @@ -165,7 +165,7 @@ static void __rsv_window_dump(struct rb_root *root, int verbose, printk("Block Allocation Reservation Windows Map (%s):\n", fn); while (n) { - rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); + rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); if (verbose) printk("reservation window 0x%p " "start: %llu, end: %llu\n", @@ -747,7 +747,7 @@ find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, here = 0; p = ((char *)bh->b_data) + (here >> 3); - r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); + r = memscan(p, 0, (maxblocks - here + 7) >> 3); next = (r - ((char *)bh->b_data)) << 3; if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) @@ -966,7 +966,7 @@ static int find_next_reservable_window( prev = rsv; next = rb_next(&rsv->rsv_node); - rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node); + rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); /* * Reached the last reservation, we can just append to the @@ -1165,7 +1165,7 @@ static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv, * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) return 0; /* success */ /* * if the first free bit we found is out of the reservable space @@ -1210,7 +1210,7 @@ static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv, if (!next) my_rsv->rsv_end += size; else { - next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node); + next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) my_rsv->rsv_end += size; @@ -1288,7 +1288,7 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, } /* * grp_goal is a group relative block number (if there is a goal) - * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb) + * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) * first block is a filesystem wide block number * first block is the block number of the first block in this group */ @@ -1324,14 +1324,10 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) grp_goal = -1; - } else if (grp_goal >= 0) { - int curr = my_rsv->rsv_end - - (grp_goal + group_first_block) + 1; - - if (curr < *count) - try_to_extend_reservation(my_rsv, sb, - *count - curr); - } + } else if (grp_goal > 0 && + (my_rsv->rsv_end-grp_goal+1) < *count) + try_to_extend_reservation(my_rsv, sb, + *count-my_rsv->rsv_end + grp_goal - 1); if ((my_rsv->rsv_start > group_last_block) || (my_rsv->rsv_end < group_first_block)) { @@ -1529,8 +1525,10 @@ ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode, if (group_no >= ngroups) group_no = 0; gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); - if (!gdp) - goto io_error; + if (!gdp) { + *errp = -EIO; + goto out; + } free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * skip this group if the number of @@ -1564,7 +1562,6 @@ ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode, */ if (my_rsv) { my_rsv = NULL; - windowsz = 0; group_no = goal_group; goto retry_alloc; } diff --git a/trunk/fs/ext4/dir.c b/trunk/fs/ext4/dir.c index f2ed3e7fb9f5..f8595787a70e 100644 --- a/trunk/fs/ext4/dir.c +++ b/trunk/fs/ext4/dir.c @@ -153,9 +153,6 @@ static int ext4_readdir(struct file * filp, ext4_error (sb, "ext4_readdir", "directory #%lu contains a hole at offset %lu", inode->i_ino, (unsigned long)filp->f_pos); - /* corrupt size? Maybe no more blocks to read */ - if (filp->f_pos > inode->i_blocks << 9) - break; filp->f_pos += sb->s_blocksize - offset; continue; } diff --git a/trunk/fs/ext4/ext4_jbd2.c b/trunk/fs/ext4/ext4_jbd2.c deleted file mode 100644 index d6afe4e27340..000000000000 --- a/trunk/fs/ext4/ext4_jbd2.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Interface between ext4 and JBD - */ - -#include - -int __ext4_journal_get_undo_access(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = jbd2_journal_get_undo_access(handle, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext4_journal_get_write_access(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = jbd2_journal_get_write_access(handle, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext4_journal_forget(const char *where, handle_t *handle, - struct buffer_head *bh) -{ - int err = jbd2_journal_forget(handle, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext4_journal_revoke(const char *where, handle_t *handle, - ext4_fsblk_t blocknr, struct buffer_head *bh) -{ - int err = jbd2_journal_revoke(handle, blocknr, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext4_journal_get_create_access(const char *where, - handle_t *handle, struct buffer_head *bh) -{ - int err = jbd2_journal_get_create_access(handle, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} - -int __ext4_journal_dirty_metadata(const char *where, - handle_t *handle, struct buffer_head *bh) -{ - int err = jbd2_journal_dirty_metadata(handle, bh); - if (err) - ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); - return err; -} diff --git a/trunk/fs/ext4/extents.c b/trunk/fs/ext4/extents.c index dc2724fa7622..2608dce18f3e 100644 --- a/trunk/fs/ext4/extents.c +++ b/trunk/fs/ext4/extents.c @@ -48,7 +48,7 @@ * ext_pblock: * combine low and high parts of physical block number into ext4_fsblk_t */ -static ext4_fsblk_t ext_pblock(struct ext4_extent *ex) +static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex) { ext4_fsblk_t block; @@ -61,7 +61,7 @@ static ext4_fsblk_t ext_pblock(struct ext4_extent *ex) * idx_pblock: * combine low and high parts of a leaf physical block number into ext4_fsblk_t */ -static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) +static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) { ext4_fsblk_t block; @@ -75,7 +75,7 @@ static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) * stores a large physical block number into an extent struct, * breaking it into parts */ -static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) +static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) { ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); @@ -86,7 +86,7 @@ static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) * stores a large physical block number into an index struct, * breaking it into parts */ -static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) +static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) { ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); @@ -186,8 +186,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, depth = path->p_depth; /* try to predict block placement */ - ex = path[depth].p_ext; - if (ex) + if ((ex = path[depth].p_ext)) return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); /* it looks like index is empty; @@ -216,7 +215,7 @@ ext4_ext_new_block(handle_t *handle, struct inode *inode, return newblock; } -static int ext4_ext_space_block(struct inode *inode) +static inline int ext4_ext_space_block(struct inode *inode) { int size; @@ -229,7 +228,7 @@ static int ext4_ext_space_block(struct inode *inode) return size; } -static int ext4_ext_space_block_idx(struct inode *inode) +static inline int ext4_ext_space_block_idx(struct inode *inode) { int size; @@ -242,7 +241,7 @@ static int ext4_ext_space_block_idx(struct inode *inode) return size; } -static int ext4_ext_space_root(struct inode *inode) +static inline int ext4_ext_space_root(struct inode *inode) { int size; @@ -256,7 +255,7 @@ static int ext4_ext_space_root(struct inode *inode) return size; } -static int ext4_ext_space_root_idx(struct inode *inode) +static inline int ext4_ext_space_root_idx(struct inode *inode) { int size; @@ -477,12 +476,13 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path) /* account possible depth increase */ if (!path) { - path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), + path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); if (!path) return ERR_PTR(-ENOMEM); alloc = 1; } + memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); path[0].p_hdr = eh; /* walk through the tree */ @@ -543,8 +543,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix; int len, err; - err = ext4_ext_get_access(handle, inode, curp); - if (err) + if ((err = ext4_ext_get_access(handle, inode, curp))) return err; BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); @@ -642,9 +641,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, * We need this to handle errors and free blocks * upon them. */ - ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); + ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); if (!ablocks) return -ENOMEM; + memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth); /* allocate all needed blocks */ ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); @@ -665,8 +665,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); - if (err) + if ((err = ext4_journal_get_create_access(handle, bh))) goto cleanup; neh = ext_block_hdr(bh); @@ -703,21 +702,18 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - err = ext4_journal_dirty_metadata(handle, bh); - if (err) + if ((err = ext4_journal_dirty_metadata(handle, bh))) goto cleanup; brelse(bh); bh = NULL; /* correct old leaf */ if (m) { - err = ext4_ext_get_access(handle, inode, path + depth); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path + depth))) goto cleanup; path[depth].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); - err = ext4_ext_dirty(handle, inode, path + depth); - if (err) + if ((err = ext4_ext_dirty(handle, inode, path + depth))) goto cleanup; } @@ -740,8 +736,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); - if (err) + if ((err = ext4_journal_get_create_access(handle, bh))) goto cleanup; neh = ext_block_hdr(bh); @@ -785,8 +780,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - err = ext4_journal_dirty_metadata(handle, bh); - if (err) + if ((err = ext4_journal_dirty_metadata(handle, bh))) goto cleanup; brelse(bh); bh = NULL; @@ -806,6 +800,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } /* insert new index */ + if (err) + goto cleanup; + err = ext4_ext_insert_index(handle, inode, path + at, le32_to_cpu(border), newblock); @@ -860,8 +857,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, } lock_buffer(bh); - err = ext4_journal_get_create_access(handle, bh); - if (err) { + if ((err = ext4_journal_get_create_access(handle, bh))) { unlock_buffer(bh); goto out; } @@ -881,13 +877,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - err = ext4_journal_dirty_metadata(handle, bh); - if (err) + if ((err = ext4_journal_dirty_metadata(handle, bh))) goto out; /* create index in new top-level index: num,max,pointer */ - err = ext4_ext_get_access(handle, inode, curp); - if (err) + if ((err = ext4_ext_get_access(handle, inode, curp))) goto out; curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; @@ -1079,31 +1073,27 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, */ k = depth - 1; border = path[depth].p_ext->ee_block; - err = ext4_ext_get_access(handle, inode, path + k); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path + k))) return err; path[k].p_idx->ei_block = border; - err = ext4_ext_dirty(handle, inode, path + k); - if (err) + if ((err = ext4_ext_dirty(handle, inode, path + k))) return err; while (k--) { /* change all left-side indexes */ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) break; - err = ext4_ext_get_access(handle, inode, path + k); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path + k))) break; path[k].p_idx->ei_block = border; - err = ext4_ext_dirty(handle, inode, path + k); - if (err) + if ((err = ext4_ext_dirty(handle, inode, path + k))) break; } return err; } -static int +static int inline ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2) { @@ -1155,8 +1145,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, le16_to_cpu(newext->ee_len), le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len), ext_pblock(ex)); - err = ext4_ext_get_access(handle, inode, path + depth); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path + depth))) return err; ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) + le16_to_cpu(newext->ee_len)); @@ -1206,8 +1195,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, has_space: nearex = path[depth].p_ext; - err = ext4_ext_get_access(handle, inode, path + depth); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path + depth))) goto cleanup; if (!nearex) { @@ -1395,7 +1383,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, return err; } -static void +static inline void ext4_ext_put_in_cache(struct inode *inode, __u32 block, __u32 len, __u32 start, int type) { @@ -1413,7 +1401,7 @@ ext4_ext_put_in_cache(struct inode *inode, __u32 block, * calculate boundaries of the gap that the requested block fits into * and cache this gap */ -static void +static inline void ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, unsigned long block) { @@ -1454,7 +1442,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); } -static int +static inline int ext4_ext_in_cache(struct inode *inode, unsigned long block, struct ext4_extent *ex) { @@ -1501,12 +1489,10 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, path--; leaf = idx_pblock(path->p_idx); BUG_ON(path->p_hdr->eh_entries == 0); - err = ext4_ext_get_access(handle, inode, path); - if (err) + if ((err = ext4_ext_get_access(handle, inode, path))) return err; path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); - err = ext4_ext_dirty(handle, inode, path); - if (err) + if ((err = ext4_ext_dirty(handle, inode, path))) return err; ext_debug("index is empty, remove it, free block %llu\n", leaf); bh = sb_find_get_block(inode->i_sb, leaf); @@ -1523,7 +1509,7 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, * the caller should calculate credits under truncate_mutex and * pass the actual path. */ -int ext4_ext_calc_credits_for_insert(struct inode *inode, +int inline ext4_ext_calc_credits_for_insert(struct inode *inode, struct ext4_ext_path *path) { int depth, needed; @@ -1548,17 +1534,16 @@ int ext4_ext_calc_credits_for_insert(struct inode *inode, /* * tree can be full, so it would need to grow in depth: - * we need one credit to modify old root, credits for - * new root will be added in split accounting + * allocation + old root + new root */ - needed += 1; + needed += 2 + 1 + 1; /* * Index split can happen, we would need: * allocate intermediate indexes (bitmap + group) * + change two blocks at each level, but root (already included) */ - needed += (depth * 2) + (depth * 2); + needed = (depth * 2) + (depth * 2); /* any allocation modifies superblock */ needed += 1; @@ -1733,7 +1718,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, * ext4_ext_more_to_rm: * returns 1 if current index has to be freed (even partial) */ -static int +static int inline ext4_ext_more_to_rm(struct ext4_ext_path *path) { BUG_ON(path->p_idx == NULL); @@ -1771,11 +1756,12 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start) * We start scanning from right side, freeing all the blocks * after i_size and walking into the tree depth-wise. */ - path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); + path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; } + memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); path[0].p_hdr = ext_inode_hdr(inode); if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { err = -EIO; @@ -1946,8 +1932,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, mutex_lock(&EXT4_I(inode)->truncate_mutex); /* check in cache */ - goal = ext4_ext_in_cache(inode, iblock, &newex); - if (goal) { + if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { if (goal == EXT4_EXT_CACHE_GAP) { if (!create) { /* block isn't allocated yet and @@ -1986,8 +1971,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, */ BUG_ON(path[depth].p_ext == NULL && depth != 0); - ex = path[depth].p_ext; - if (ex) { + if ((ex = path[depth].p_ext)) { unsigned long ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext_pblock(ex); unsigned short ee_len = le16_to_cpu(ex->ee_len); diff --git a/trunk/fs/ext4/inode.c b/trunk/fs/ext4/inode.c index 1d85d4ec9598..0a60ec5a16db 100644 --- a/trunk/fs/ext4/inode.c +++ b/trunk/fs/ext4/inode.c @@ -1147,102 +1147,37 @@ static int do_journal_get_write_access(handle_t *handle, return ext4_journal_get_write_access(handle, bh); } -/* - * The idea of this helper function is following: - * if prepare_write has allocated some blocks, but not all of them, the - * transaction must include the content of the newly allocated blocks. - * This content is expected to be set to zeroes by block_prepare_write(). - * 2006/10/14 SAW - */ -static int ext4_prepare_failure(struct file *file, struct page *page, - unsigned from, unsigned to) -{ - struct address_space *mapping; - struct buffer_head *bh, *head, *next; - unsigned block_start, block_end; - unsigned blocksize; - int ret; - handle_t *handle = ext4_journal_current_handle(); - - mapping = page->mapping; - if (ext4_should_writeback_data(mapping->host)) { - /* optimization: no constraints about data */ -skip: - return ext4_journal_stop(handle); - } - - head = page_buffers(page); - blocksize = head->b_size; - for ( bh = head, block_start = 0; - bh != head || !block_start; - block_start = block_end, bh = next) - { - next = bh->b_this_page; - block_end = block_start + blocksize; - if (block_end <= from) - continue; - if (block_start >= to) { - block_start = to; - break; - } - if (!buffer_mapped(bh)) - /* prepare_write failed on this bh */ - break; - if (ext4_should_journal_data(mapping->host)) { - ret = do_journal_get_write_access(handle, bh); - if (ret) { - ext4_journal_stop(handle); - return ret; - } - } - /* - * block_start here becomes the first block where the current iteration - * of prepare_write failed. - */ - } - if (block_start <= from) - goto skip; - - /* commit allocated and zeroed buffers */ - return mapping->a_ops->commit_write(file, page, from, block_start); -} - static int ext4_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - int ret, ret2; - int needed_blocks = ext4_writepage_trans_blocks(inode); + int ret, needed_blocks = ext4_writepage_trans_blocks(inode); handle_t *handle; int retries = 0; retry: handle = ext4_journal_start(inode, needed_blocks); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) ret = nobh_prepare_write(page, from, to, ext4_get_block); else ret = block_prepare_write(page, from, to, ext4_get_block); if (ret) - goto failure; + goto prepare_write_failed; if (ext4_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); - if (ret) - /* fatal error, just put the handle and return */ - journal_stop(handle); } - return ret; - -failure: - ret2 = ext4_prepare_failure(file, page, from, to); - if (ret2 < 0) - return ret2; +prepare_write_failed: + if (ret) + ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; - /* retry number exceeded, or other error like -EDQUOT */ +out: return ret; } diff --git a/trunk/fs/ext4/namei.c b/trunk/fs/ext4/namei.c index 859990eac504..8b1bd03d20f5 100644 --- a/trunk/fs/ext4/namei.c +++ b/trunk/fs/ext4/namei.c @@ -552,15 +552,6 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de)) { - if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, - (block<i_sb)) - +((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse (bh); - return count; - } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c index 486a641ca71b..b4b022aa2bc2 100644 --- a/trunk/fs/ext4/super.c +++ b/trunk/fs/ext4/super.c @@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb) return; } -static struct kmem_cache *ext4_inode_cachep; +static kmem_cache_t *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS @@ -495,7 +495,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; - ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); + ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS); if (!ei) return NULL; #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL @@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode) kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; @@ -1321,12 +1321,6 @@ static void ext4_orphan_cleanup (struct super_block * sb, return; } - if (bdev_read_only(sb->s_bdev)) { - printk(KERN_ERR "EXT4-fs: write access " - "unavailable, skipping orphan cleanup.\n"); - return; - } - if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " @@ -2466,7 +2460,6 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf) struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead; int i; - u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -2513,10 +2506,6 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); - buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; - buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/trunk/fs/ext4/xattr.c b/trunk/fs/ext4/xattr.c index dc969c357aa1..63233cd946a7 100644 --- a/trunk/fs/ext4/xattr.c +++ b/trunk/fs/ext4/xattr.c @@ -459,11 +459,14 @@ static void ext4_xattr_update_super_block(handle_t *handle, if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) return; + lock_super(sb); if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { - EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); + EXT4_SB(sb)->s_es->s_feature_compat |= + cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); } + unlock_super(sb); } /* diff --git a/trunk/fs/fat/cache.c b/trunk/fs/fat/cache.c index 05c2941c74f2..82cc4f59e3ba 100644 --- a/trunk/fs/fat/cache.c +++ b/trunk/fs/fat/cache.c @@ -34,9 +34,9 @@ static inline int fat_max_cache(struct inode *inode) return FAT_MAX_CACHE; } -static struct kmem_cache *fat_cache_cachep; +static kmem_cache_t *fat_cache_cachep; -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct fat_cache *cache = (struct fat_cache *)foo; @@ -63,7 +63,7 @@ void fat_cache_destroy(void) static inline struct fat_cache *fat_cache_alloc(struct inode *inode) { - return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL); + return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL); } static inline void fat_cache_free(struct fat_cache *cache) diff --git a/trunk/fs/fat/inode.c b/trunk/fs/fat/inode.c index a9e4688582a2..78945b53b0f8 100644 --- a/trunk/fs/fat/inode.c +++ b/trunk/fs/fat/inode.c @@ -477,12 +477,12 @@ static void fat_put_super(struct super_block *sb) kfree(sbi); } -static struct kmem_cache *fat_inode_cachep; +static kmem_cache_t *fat_inode_cachep; static struct inode *fat_alloc_inode(struct super_block *sb) { struct msdos_inode_info *ei; - ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -493,7 +493,7 @@ static void fat_destroy_inode(struct inode *inode) kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; diff --git a/trunk/fs/fcntl.c b/trunk/fs/fcntl.c index 4740d35e52cd..e4f26165f12a 100644 --- a/trunk/fs/fcntl.c +++ b/trunk/fs/fcntl.c @@ -553,7 +553,7 @@ int send_sigurg(struct fown_struct *fown) } static DEFINE_RWLOCK(fasync_lock); -static struct kmem_cache *fasync_cache __read_mostly; +static kmem_cache_t *fasync_cache __read_mostly; /* * fasync_helper() is used by some character device drivers (mainly mice) @@ -567,7 +567,7 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap int result = 0; if (on) { - new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); + new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); if (!new) return -ENOMEM; } diff --git a/trunk/fs/file.c b/trunk/fs/file.c index 51aef675470f..8e81775c5dc8 100644 --- a/trunk/fs/file.c +++ b/trunk/fs/file.c @@ -21,6 +21,7 @@ struct fdtable_defer { spinlock_t lock; struct work_struct wq; + struct timer_list timer; struct fdtable *next; }; @@ -74,10 +75,24 @@ static void __free_fdtable(struct fdtable *fdt) kfree(fdt); } -static void free_fdtable_work(struct work_struct *work) +static void fdtable_timer(unsigned long data) +{ + struct fdtable_defer *fddef = (struct fdtable_defer *)data; + + spin_lock(&fddef->lock); + /* + * If someone already emptied the queue return. + */ + if (!fddef->next) + goto out; + if (!schedule_work(&fddef->wq)) + mod_timer(&fddef->timer, 5); +out: + spin_unlock(&fddef->lock); +} + +static void free_fdtable_work(struct fdtable_defer *f) { - struct fdtable_defer *f = - container_of(work, struct fdtable_defer, wq); struct fdtable *fdt; spin_lock_bh(&f->lock); @@ -127,8 +142,13 @@ static void free_fdtable_rcu(struct rcu_head *rcu) spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; - /* vmallocs are handled from the workqueue context */ - schedule_work(&fddef->wq); + /* + * vmallocs are handled from the workqueue context. + * If the per-cpu workqueue is running, then we + * defer work scheduling through a timer. + */ + if (!schedule_work(&fddef->wq)) + mod_timer(&fddef->timer, 5); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } @@ -331,7 +351,10 @@ static void __devinit fdtable_defer_list_init(int cpu) { struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); spin_lock_init(&fddef->lock); - INIT_WORK(&fddef->wq, free_fdtable_work); + INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); + init_timer(&fddef->timer); + fddef->timer.data = (unsigned long)fddef; + fddef->timer.function = fdtable_timer; fddef->next = NULL; } diff --git a/trunk/fs/freevxfs/vxfs_inode.c b/trunk/fs/freevxfs/vxfs_inode.c index 0b7ae897cb78..4786d51ad3bd 100644 --- a/trunk/fs/freevxfs/vxfs_inode.c +++ b/trunk/fs/freevxfs/vxfs_inode.c @@ -46,7 +46,7 @@ extern const struct address_space_operations vxfs_immed_aops; extern struct inode_operations vxfs_immed_symlink_iops; -struct kmem_cache *vxfs_inode_cachep; +kmem_cache_t *vxfs_inode_cachep; #ifdef DIAGNOSTIC @@ -103,7 +103,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino) struct vxfs_inode_info *vip; struct vxfs_dinode *dip; - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) goto fail; dip = (struct vxfs_dinode *)(bp->b_data + offset); memcpy(vip, dip, sizeof(*vip)); @@ -145,7 +145,7 @@ __vxfs_iget(ino_t ino, struct inode *ilistp) struct vxfs_dinode *dip; caddr_t kaddr = (char *)page_address(pp); - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) goto fail; dip = (struct vxfs_dinode *)(kaddr + offset); memcpy(vip, dip, sizeof(*vip)); diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c index 357764d85ff1..66571eafbb1e 100644 --- a/trunk/fs/fuse/dev.c +++ b/trunk/fs/fuse/dev.c @@ -19,7 +19,7 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); -static struct kmem_cache *fuse_req_cachep; +static kmem_cache_t *fuse_req_cachep; static struct fuse_conn *fuse_get_conn(struct file *file) { @@ -41,7 +41,7 @@ static void fuse_request_init(struct fuse_req *req) struct fuse_req *fuse_request_alloc(void) { - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); if (req) fuse_request_init(req); return req; diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index 1cabdb229adb..c71a6c092ad9 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -141,6 +141,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) struct fuse_req *forget_req; struct dentry *parent; + /* Doesn't hurt to "reset" the validity timeout */ + fuse_invalidate_entry_cache(entry); + /* For negative dentries, always do a fresh lookup */ if (!inode) return 0; @@ -1024,8 +1027,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) if (attr->ia_valid & ATTR_SIZE) { unsigned long limit; is_truncate = 1; - if (IS_SWAPFILE(inode)) - return -ETXTBSY; limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) { send_sig(SIGXFSZ, current, 0); diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index 128f79c40803..763a50daf1c0 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -754,42 +754,6 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) return err; } -static sector_t fuse_bmap(struct address_space *mapping, sector_t block) -{ - struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); - struct fuse_req *req; - struct fuse_bmap_in inarg; - struct fuse_bmap_out outarg; - int err; - - if (!inode->i_sb->s_bdev || fc->no_bmap) - return 0; - - req = fuse_get_req(fc); - if (IS_ERR(req)) - return 0; - - memset(&inarg, 0, sizeof(inarg)); - inarg.block = block; - inarg.blocksize = inode->i_sb->s_blocksize; - req->in.h.opcode = FUSE_BMAP; - req->in.h.nodeid = get_node_id(inode); - req->in.numargs = 1; - req->in.args[0].size = sizeof(inarg); - req->in.args[0].value = &inarg; - req->out.numargs = 1; - req->out.args[0].size = sizeof(outarg); - req->out.args[0].value = &outarg; - request_send(fc, req); - err = req->out.h.error; - fuse_put_request(fc, req); - if (err == -ENOSYS) - fc->no_bmap = 1; - - return err ? 0 : outarg.block; -} - static const struct file_operations fuse_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -823,7 +787,6 @@ static const struct address_space_operations fuse_file_aops = { .commit_write = fuse_commit_write, .readpages = fuse_readpages, .set_page_dirty = fuse_set_page_dirty, - .bmap = fuse_bmap, }; void fuse_init_file_inode(struct inode *inode) diff --git a/trunk/fs/fuse/fuse_i.h b/trunk/fs/fuse/fuse_i.h index b98b20de7405..91edb8932d90 100644 --- a/trunk/fs/fuse/fuse_i.h +++ b/trunk/fs/fuse/fuse_i.h @@ -298,9 +298,6 @@ struct fuse_conn { reply, before any other request, and never cleared */ unsigned conn_error : 1; - /** Connection successful. Only set in INIT */ - unsigned conn_init : 1; - /** Do readpages asynchronously? Only set in INIT */ unsigned async_read : 1; @@ -342,9 +339,6 @@ struct fuse_conn { /** Is interrupt not implemented by fs? */ unsigned no_interrupt : 1; - /** Is bmap not implemented by fs? */ - unsigned no_bmap : 1; - /** The number of requests waiting for completion */ atomic_t num_waiting; @@ -371,9 +365,6 @@ struct fuse_conn { /** Key for lock owner ID scrambling */ u32 scramble_key[4]; - - /** Reserved request for the DESTROY message */ - struct fuse_req *destroy_req; }; static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index 12450d2b320e..fc4203570370 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -22,7 +22,7 @@ MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); -static struct kmem_cache *fuse_inode_cachep; +static kmem_cache_t *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); @@ -39,7 +39,6 @@ struct fuse_mount_data { unsigned group_id_present : 1; unsigned flags; unsigned max_read; - unsigned blksize; }; static struct inode *fuse_alloc_inode(struct super_block *sb) @@ -47,7 +46,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) struct inode *inode; struct fuse_inode *fi; - inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); + inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL); if (!inode) return NULL; @@ -206,23 +205,10 @@ static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags) fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb)); } -static void fuse_send_destroy(struct fuse_conn *fc) -{ - struct fuse_req *req = fc->destroy_req; - if (req && fc->conn_init) { - fc->destroy_req = NULL; - req->in.h.opcode = FUSE_DESTROY; - req->force = 1; - request_send(fc, req); - fuse_put_request(fc, req); - } -} - static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); - fuse_send_destroy(fc); spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; @@ -288,7 +274,6 @@ enum { OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, - OPT_BLKSIZE, OPT_ERR }; @@ -300,16 +285,14 @@ static match_table_t tokens = { {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, - {OPT_BLKSIZE, "blksize=%u"}, {OPT_ERR, NULL} }; -static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) +static int parse_fuse_opt(char *opt, struct fuse_mount_data *d) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; - d->blksize = 512; while ((p = strsep(&opt, ",")) != NULL) { int token; @@ -362,12 +345,6 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) d->max_read = value; break; - case OPT_BLKSIZE: - if (!is_bdev || match_int(&args[0], &value)) - return 0; - d->blksize = value; - break; - default: return 0; } @@ -423,8 +400,6 @@ static struct fuse_conn *new_conn(void) void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { - if (fc->destroy_req) - fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); kfree(fc); } @@ -481,7 +456,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; - fc->conn_init = 1; } fuse_put_request(fc, req); fc->blocked = 0; @@ -526,23 +500,15 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) struct dentry *root_dentry; struct fuse_req *init_req; int err; - int is_bdev = sb->s_bdev != NULL; if (sb->s_flags & MS_MANDLOCK) return -EINVAL; - if (!parse_fuse_opt((char *) data, &d, is_bdev)) + if (!parse_fuse_opt((char *) data, &d)) return -EINVAL; - if (is_bdev) { -#ifdef CONFIG_BLOCK - if (!sb_set_blocksize(sb, d.blksize)) - return -EINVAL; -#endif - } else { - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - } + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -581,12 +547,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (!init_req) goto err_put_root; - if (is_bdev) { - fc->destroy_req = fuse_request_alloc(); - if (!fc->destroy_req) - goto err_put_root; - } - mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) @@ -638,47 +598,10 @@ static struct file_system_type fuse_fs_type = { .kill_sb = kill_anon_super, }; -#ifdef CONFIG_BLOCK -static int fuse_get_sb_blk(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *raw_data, struct vfsmount *mnt) -{ - return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super, - mnt); -} - -static struct file_system_type fuseblk_fs_type = { - .owner = THIS_MODULE, - .name = "fuseblk", - .get_sb = fuse_get_sb_blk, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, -}; - -static inline int register_fuseblk(void) -{ - return register_filesystem(&fuseblk_fs_type); -} - -static inline void unregister_fuseblk(void) -{ - unregister_filesystem(&fuseblk_fs_type); -} -#else -static inline int register_fuseblk(void) -{ - return 0; -} - -static inline void unregister_fuseblk(void) -{ -} -#endif - static decl_subsys(fuse, NULL, NULL); static decl_subsys(connections, NULL, NULL); -static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, +static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct inode * inode = foo; @@ -694,34 +617,24 @@ static int __init fuse_fs_init(void) err = register_filesystem(&fuse_fs_type); if (err) - goto out; - - err = register_fuseblk(); - if (err) - goto out_unreg; - - fuse_inode_cachep = kmem_cache_create("fuse_inode", - sizeof(struct fuse_inode), - 0, SLAB_HWCACHE_ALIGN, - fuse_inode_init_once, NULL); - err = -ENOMEM; - if (!fuse_inode_cachep) - goto out_unreg2; - - return 0; + printk("fuse: failed to register filesystem\n"); + else { + fuse_inode_cachep = kmem_cache_create("fuse_inode", + sizeof(struct fuse_inode), + 0, SLAB_HWCACHE_ALIGN, + fuse_inode_init_once, NULL); + if (!fuse_inode_cachep) { + unregister_filesystem(&fuse_fs_type); + err = -ENOMEM; + } + } - out_unreg2: - unregister_fuseblk(); - out_unreg: - unregister_filesystem(&fuse_fs_type); - out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); - unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } diff --git a/trunk/fs/gfs2/glock.c b/trunk/fs/gfs2/glock.c index 55f5333dae99..78fe0fae23ff 100644 --- a/trunk/fs/gfs2/glock.c +++ b/trunk/fs/gfs2/glock.c @@ -35,7 +35,7 @@ struct greedy { struct gfs2_holder gr_gh; - struct delayed_work gr_work; + struct work_struct gr_work; }; struct gfs2_gl_hash_bucket { @@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, glops->go_xmote_th(gl, state, flags); } -static void greedy_work(struct work_struct *work) +static void greedy_work(void *data) { - struct greedy *gr = container_of(work, struct greedy, gr_work.work); + struct greedy *gr = data; struct gfs2_holder *gh = &gr->gr_gh; struct gfs2_glock *gl = gh->gh_gl; const struct gfs2_glock_operations *glops = gl->gl_ops; @@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) gfs2_holder_init(gl, 0, 0, gh); set_bit(HIF_GREEDY, &gh->gh_iflags); - INIT_DELAYED_WORK(&gr->gr_work, greedy_work); + INIT_WORK(&gr->gr_work, greedy_work, gr); set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); schedule_delayed_work(&gr->gr_work, time); diff --git a/trunk/fs/gfs2/main.c b/trunk/fs/gfs2/main.c index 7c1a9e22a526..9889c1eacec1 100644 --- a/trunk/fs/gfs2/main.c +++ b/trunk/fs/gfs2/main.c @@ -25,7 +25,7 @@ #include "util.h" #include "glock.h" -static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct gfs2_inode *ip = foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == @@ -37,7 +37,7 @@ static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned } } -static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct gfs2_glock *gl = foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == diff --git a/trunk/fs/gfs2/util.c b/trunk/fs/gfs2/util.c index e5707a9f78c2..196c604faadc 100644 --- a/trunk/fs/gfs2/util.c +++ b/trunk/fs/gfs2/util.c @@ -23,9 +23,9 @@ #include "lm.h" #include "util.h" -struct kmem_cache *gfs2_glock_cachep __read_mostly; -struct kmem_cache *gfs2_inode_cachep __read_mostly; -struct kmem_cache *gfs2_bufdata_cachep __read_mostly; +kmem_cache_t *gfs2_glock_cachep __read_mostly; +kmem_cache_t *gfs2_inode_cachep __read_mostly; +kmem_cache_t *gfs2_bufdata_cachep __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { diff --git a/trunk/fs/gfs2/util.h b/trunk/fs/gfs2/util.h index 7984dcf89ad0..76a50899fe9e 100644 --- a/trunk/fs/gfs2/util.h +++ b/trunk/fs/gfs2/util.h @@ -146,9 +146,9 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__); -extern struct kmem_cache *gfs2_glock_cachep; -extern struct kmem_cache *gfs2_inode_cachep; -extern struct kmem_cache *gfs2_bufdata_cachep; +extern kmem_cache_t *gfs2_glock_cachep; +extern kmem_cache_t *gfs2_inode_cachep; +extern kmem_cache_t *gfs2_bufdata_cachep; static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, unsigned int *p) diff --git a/trunk/fs/hfs/super.c b/trunk/fs/hfs/super.c index a36987966004..85b17b3fa4a0 100644 --- a/trunk/fs/hfs/super.c +++ b/trunk/fs/hfs/super.c @@ -24,7 +24,7 @@ #include "hfs_fs.h" #include "btree.h" -static struct kmem_cache *hfs_inode_cachep; +static kmem_cache_t *hfs_inode_cachep; MODULE_LICENSE("GPL"); @@ -145,7 +145,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb) { struct hfs_inode_info *i; - i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL); + i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL); return i ? &i->vfs_inode : NULL; } @@ -430,7 +430,7 @@ static struct file_system_type hfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) +static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) { struct hfs_inode_info *i = p; diff --git a/trunk/fs/hfsplus/super.c b/trunk/fs/hfsplus/super.c index 0f513c6bf843..194eede52fa4 100644 --- a/trunk/fs/hfsplus/super.c +++ b/trunk/fs/hfsplus/super.c @@ -434,13 +434,13 @@ MODULE_AUTHOR("Brad Boyer"); MODULE_DESCRIPTION("Extended Macintosh Filesystem"); MODULE_LICENSE("GPL"); -static struct kmem_cache *hfsplus_inode_cachep; +static kmem_cache_t *hfsplus_inode_cachep; static struct inode *hfsplus_alloc_inode(struct super_block *sb) { struct hfsplus_inode_info *i; - i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL); + i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL); return i ? &i->vfs_inode : NULL; } @@ -467,7 +467,7 @@ static struct file_system_type hfsplus_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) +static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) { struct hfsplus_inode_info *i = p; diff --git a/trunk/fs/hpfs/dir.c b/trunk/fs/hpfs/dir.c index 594f9c428fc2..ecc9180645ae 100644 --- a/trunk/fs/hpfs/dir.c +++ b/trunk/fs/hpfs/dir.c @@ -84,8 +84,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } if (!fno->dirflag) { e = 1; - hpfs_error(inode->i_sb, "not a directory, fnode %08lx", - (unsigned long)inode->i_ino); + hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino); } if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { e = 1; @@ -145,11 +144,8 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } if (de->first || de->last) { if (hpfs_sb(inode->i_sb)->sb_chk) { - if (de->first && !de->last && (de->namelen != 2 - || de ->name[0] != 1 || de->name[1] != 1)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos); - if (de->last && (de->namelen != 1 || de ->name[0] != 255)) - hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos); + if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos); + if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos); } hpfs_brelse4(&qbh); goto again; diff --git a/trunk/fs/hpfs/dnode.c b/trunk/fs/hpfs/dnode.c index fe83c2b7d2d8..229ff2fb1809 100644 --- a/trunk/fs/hpfs/dnode.c +++ b/trunk/fs/hpfs/dnode.c @@ -533,13 +533,10 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; - if (hpfs_sb(i->i_sb)->sb_chk) - if (up != i->i_ino) { - hpfs_error(i->i_sb, - "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", - dno, up, (unsigned long)i->i_ino); + if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { + hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino); return; - } + } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = up; d1->root_dnode = 1; @@ -854,9 +851,7 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { - hpfs_error(inode->i_sb, - "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", - (unsigned long long)*posp); + hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp); goto bail; } /* We're going down the tree */ diff --git a/trunk/fs/hpfs/ea.c b/trunk/fs/hpfs/ea.c index 547a8384571f..66339dc030e4 100644 --- a/trunk/fs/hpfs/ea.c +++ b/trunk/fs/hpfs/ea.c @@ -243,9 +243,8 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data fnode->ea_offs = 0xc4; } if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { - hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", - (unsigned long)inode->i_ino, - fnode->ea_offs, fnode->ea_size_s); + hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x", + inode->i_ino, fnode->ea_offs, fnode->ea_size_s); return; } if ((fnode->ea_size_s || !fnode->ea_size_l) && diff --git a/trunk/fs/hpfs/hpfs_fn.h b/trunk/fs/hpfs/hpfs_fn.h index 1c07aa82d327..32ab51e42b96 100644 --- a/trunk/fs/hpfs/hpfs_fn.h +++ b/trunk/fs/hpfs/hpfs_fn.h @@ -317,8 +317,7 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb) /* super.c */ -void hpfs_error(struct super_block *, const char *, ...) - __attribute__((format (printf, 2, 3))); +void hpfs_error(struct super_block *, char *, ...); int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); unsigned hpfs_count_one_bitmap(struct super_block *, secno); diff --git a/trunk/fs/hpfs/inode.c b/trunk/fs/hpfs/inode.c index 85d3e1d9ac00..7faef8544f32 100644 --- a/trunk/fs/hpfs/inode.c +++ b/trunk/fs/hpfs/inode.c @@ -251,10 +251,7 @@ void hpfs_write_inode_nolock(struct inode *i) de->file_size = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - } else - hpfs_error(i->i_sb, - "directory %08lx doesn't have '.' entry", - (unsigned long)i->i_ino); + } else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino); } mark_buffer_dirty(bh); brelse(bh); diff --git a/trunk/fs/hpfs/map.c b/trunk/fs/hpfs/map.c index c4724589b2eb..0fecdac22e4e 100644 --- a/trunk/fs/hpfs/map.c +++ b/trunk/fs/hpfs/map.c @@ -126,40 +126,32 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea struct extended_attribute *ea; struct extended_attribute *ea_end; if (fnode->magic != FNODE_MAGIC) { - hpfs_error(s, "bad magic on fnode %08lx", - (unsigned long)ino); + hpfs_error(s, "bad magic on fnode %08x", ino); goto bail; } if (!fnode->dirflag) { if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != (fnode->btree.internal ? 12 : 8)) { - hpfs_error(s, - "bad number of nodes in fnode %08lx", - (unsigned long)ino); + hpfs_error(s, "bad number of nodes in fnode %08x", ino); goto bail; } if (fnode->btree.first_free != 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { - hpfs_error(s, - "bad first_free pointer in fnode %08lx", - (unsigned long)ino); + hpfs_error(s, "bad first_free pointer in fnode %08x", ino); goto bail; } } if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { - hpfs_error(s, - "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", - (unsigned long)ino, - fnode->ea_offs, fnode->ea_size_s); + hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x", + ino, fnode->ea_offs, fnode->ea_size_s); goto bail; } ea = fnode_ea(fnode); ea_end = fnode_end_ea(fnode); while (ea != ea_end) { if (ea > ea_end) { - hpfs_error(s, "bad EA in fnode %08lx", - (unsigned long)ino); + hpfs_error(s, "bad EA in fnode %08x", ino); goto bail; } ea = next_ea(ea); diff --git a/trunk/fs/hpfs/super.c b/trunk/fs/hpfs/super.c index d4abc1a1d566..450b5e0b4785 100644 --- a/trunk/fs/hpfs/super.c +++ b/trunk/fs/hpfs/super.c @@ -46,17 +46,21 @@ static void unmark_dirty(struct super_block *s) } /* Filesystem error... */ -static char err_buf[1024]; -void hpfs_error(struct super_block *s, const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - vsnprintf(err_buf, sizeof(err_buf), fmt, args); - va_end(args); +#define ERR_BUF_SIZE 1024 - printk("HPFS: filesystem error: %s", err_buf); +void hpfs_error(struct super_block *s, char *m,...) +{ + char *buf; + va_list l; + va_start(l, m); + if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL))) + printk("HPFS: No memory for error message '%s'\n",m); + else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE) + printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n"); + printk("HPFS: filesystem error: "); + if (buf) printk("%s", buf); + else printk("%s\n",m); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { printk("; crashing the system because you wanted it\n"); @@ -72,6 +76,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...) } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); } else printk("\n"); + kfree(buf); hpfs_sb(s)->sb_was_error = 1; } @@ -155,12 +160,12 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static struct kmem_cache * hpfs_inode_cachep; +static kmem_cache_t * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; - ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); + ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -172,7 +177,7 @@ static void hpfs_destroy_inode(struct inode *inode) kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; diff --git a/trunk/fs/hugetlbfs/inode.c b/trunk/fs/hugetlbfs/inode.c index 0706f5aac6a2..7f4756963d05 100644 --- a/trunk/fs/hugetlbfs/inode.c +++ b/trunk/fs/hugetlbfs/inode.c @@ -513,7 +513,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) } -static struct kmem_cache *hugetlbfs_inode_cachep; +static kmem_cache_t *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { @@ -522,7 +522,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; - p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); + p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; @@ -545,7 +545,7 @@ static const struct address_space_operations hugetlbfs_aops = { }; -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index 9ecccab7326d..26cdb115ce67 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -97,7 +97,7 @@ static DEFINE_MUTEX(iprune_mutex); */ struct inodes_stat_t inodes_stat; -static struct kmem_cache * inode_cachep __read_mostly; +static kmem_cache_t * inode_cachep __read_mostly; static struct inode *alloc_inode(struct super_block *sb) { @@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct super_block *sb) if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else - inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); + inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); if (inode) { struct address_space * const mapping = &inode->i_data; @@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode) EXPORT_SYMBOL(inode_init_once); -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct inode * inode = (struct inode *) foo; @@ -1242,6 +1242,9 @@ EXPORT_SYMBOL(inode_needs_sync); */ #ifdef CONFIG_QUOTA +/* Function back in dquot.c */ +int remove_inode_dquot_ref(struct inode *, int, struct list_head *); + void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree_head) { diff --git a/trunk/fs/inotify_user.c b/trunk/fs/inotify_user.c index e1956e6f116c..017cb0f134d6 100644 --- a/trunk/fs/inotify_user.c +++ b/trunk/fs/inotify_user.c @@ -34,8 +34,8 @@ #include -static struct kmem_cache *watch_cachep __read_mostly; -static struct kmem_cache *event_cachep __read_mostly; +static kmem_cache_t *watch_cachep __read_mostly; +static kmem_cache_t *event_cachep __read_mostly; static struct vfsmount *inotify_mnt __read_mostly; diff --git a/trunk/fs/isofs/inode.c b/trunk/fs/isofs/inode.c index ea55b6c469ec..c34b862cdbf2 100644 --- a/trunk/fs/isofs/inode.c +++ b/trunk/fs/isofs/inode.c @@ -57,12 +57,12 @@ static void isofs_put_super(struct super_block *sb) static void isofs_read_inode(struct inode *); static int isofs_statfs (struct dentry *, struct kstatfs *); -static struct kmem_cache *isofs_inode_cachep; +static kmem_cache_t *isofs_inode_cachep; static struct inode *isofs_alloc_inode(struct super_block *sb) { struct iso_inode_info *ei; - ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode) kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) { struct iso_inode_info *ei = foo; diff --git a/trunk/fs/jbd/journal.c b/trunk/fs/jbd/journal.c index 10fff9443938..b85c686b60db 100644 --- a/trunk/fs/jbd/journal.c +++ b/trunk/fs/jbd/journal.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -1630,7 +1630,7 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) #define JBD_MAX_SLABS 5 #define JBD_SLAB_INDEX(size) (size >> 11) -static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; +static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; static const char *jbd_slab_names[JBD_MAX_SLABS] = { "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" }; @@ -1693,7 +1693,7 @@ void jbd_slab_free(void *ptr, size_t size) /* * Journal_head storage management */ -static struct kmem_cache *journal_head_cache; +static kmem_cache_t *journal_head_cache; #ifdef CONFIG_JBD_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif @@ -1996,7 +1996,7 @@ static void __exit remove_jbd_proc_entry(void) #endif -struct kmem_cache *jbd_handle_cache; +kmem_cache_t *jbd_handle_cache; static int __init journal_init_handle_cache(void) { diff --git a/trunk/fs/jbd/revoke.c b/trunk/fs/jbd/revoke.c index d204ab394f36..c532429d8d9b 100644 --- a/trunk/fs/jbd/revoke.c +++ b/trunk/fs/jbd/revoke.c @@ -70,8 +70,8 @@ #include #endif -static struct kmem_cache *revoke_record_cache; -static struct kmem_cache *revoke_table_cache; +static kmem_cache_t *revoke_record_cache; +static kmem_cache_t *revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c index d38e0d575e48..4f82bcd63e48 100644 --- a/trunk/fs/jbd/transaction.c +++ b/trunk/fs/jbd/transaction.c @@ -27,8 +27,6 @@ #include #include -static void __journal_temp_unlink_buffer(struct journal_head *jh); - /* * get_transaction: obtain a new transaction_t object. * @@ -1501,7 +1499,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh) * * Called under j_list_lock. The journal may not be locked. */ -static void __journal_temp_unlink_buffer(struct journal_head *jh) +void __journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; diff --git a/trunk/fs/jbd2/commit.c b/trunk/fs/jbd2/commit.c index 6bd8005e3d34..70b2ae1ef281 100644 --- a/trunk/fs/jbd2/commit.c +++ b/trunk/fs/jbd2/commit.c @@ -248,12 +248,8 @@ static void journal_submit_data_buffers(journal_t *journal, bufs = 0; goto write_out_data; } - } else if (!locked && buffer_locked(bh)) { - __jbd2_journal_file_buffer(jh, commit_transaction, - BJ_Locked); - jbd_unlock_bh_state(bh); - put_bh(bh); - } else { + } + else { BUFFER_TRACE(bh, "writeout complete: unfile"); __jbd2_journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); diff --git a/trunk/fs/jbd2/journal.c b/trunk/fs/jbd2/journal.c index 44fc32bfd7f1..c60f378b0f76 100644 --- a/trunk/fs/jbd2/journal.c +++ b/trunk/fs/jbd2/journal.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -1641,7 +1641,7 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry) #define JBD_MAX_SLABS 5 #define JBD_SLAB_INDEX(size) (size >> 11) -static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; +static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; static const char *jbd_slab_names[JBD_MAX_SLABS] = { "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" }; @@ -1704,7 +1704,7 @@ void jbd2_slab_free(void *ptr, size_t size) /* * Journal_head storage management */ -static struct kmem_cache *jbd2_journal_head_cache; +static kmem_cache_t *jbd2_journal_head_cache; #ifdef CONFIG_JBD_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif @@ -2007,7 +2007,7 @@ static void __exit jbd2_remove_jbd_proc_entry(void) #endif -struct kmem_cache *jbd2_handle_cache; +kmem_cache_t *jbd2_handle_cache; static int __init journal_init_handle_cache(void) { diff --git a/trunk/fs/jbd2/revoke.c b/trunk/fs/jbd2/revoke.c index f506646ad0ff..380d19917f37 100644 --- a/trunk/fs/jbd2/revoke.c +++ b/trunk/fs/jbd2/revoke.c @@ -70,8 +70,8 @@ #include #endif -static struct kmem_cache *jbd2_revoke_record_cache; -static struct kmem_cache *jbd2_revoke_table_cache; +static kmem_cache_t *jbd2_revoke_record_cache; +static kmem_cache_t *jbd2_revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the diff --git a/trunk/fs/jbd2/transaction.c b/trunk/fs/jbd2/transaction.c index 3a8700153cb0..c051a94c8a97 100644 --- a/trunk/fs/jbd2/transaction.c +++ b/trunk/fs/jbd2/transaction.c @@ -27,8 +27,6 @@ #include #include -static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); - /* * jbd2_get_transaction: obtain a new transaction_t object. * diff --git a/trunk/fs/jffs/inode-v23.c b/trunk/fs/jffs/inode-v23.c index 9f15bce92022..3f7899ea4cba 100644 --- a/trunk/fs/jffs/inode-v23.c +++ b/trunk/fs/jffs/inode-v23.c @@ -61,8 +61,8 @@ static const struct file_operations jffs_dir_operations; static struct inode_operations jffs_dir_inode_operations; static const struct address_space_operations jffs_address_operations; -struct kmem_cache *node_cache = NULL; -struct kmem_cache *fm_cache = NULL; +kmem_cache_t *node_cache = NULL; +kmem_cache_t *fm_cache = NULL; /* Called by the VFS at mount time to initialize the whole file system. */ static int jffs_fill_super(struct super_block *sb, void *data, int silent) diff --git a/trunk/fs/jffs/intrep.c b/trunk/fs/jffs/intrep.c index d0e783f199ea..4a543e114970 100644 --- a/trunk/fs/jffs/intrep.c +++ b/trunk/fs/jffs/intrep.c @@ -66,7 +66,6 @@ #include #include #include -#include #include "intrep.h" #include "jffs_fm.h" @@ -592,7 +591,7 @@ jffs_add_virtual_root(struct jffs_control *c) D2(printk("jffs_add_virtual_root(): " "Creating a virtual root directory.\n")); - if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) { + if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) { return -ENOMEM; } no_jffs_file++; @@ -604,6 +603,7 @@ jffs_add_virtual_root(struct jffs_control *c) DJM(no_jffs_node++); memset(node, 0, sizeof(struct jffs_node)); node->ino = JFFS_MIN_INO; + memset(root, 0, sizeof(struct jffs_file)); root->ino = JFFS_MIN_INO; root->mode = S_IFDIR | S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; diff --git a/trunk/fs/jffs/jffs_fm.c b/trunk/fs/jffs/jffs_fm.c index 077258b2103e..29b68d939bd9 100644 --- a/trunk/fs/jffs/jffs_fm.c +++ b/trunk/fs/jffs/jffs_fm.c @@ -29,8 +29,8 @@ static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset); static struct jffs_fm *jffs_alloc_fm(void); static void jffs_free_fm(struct jffs_fm *n); -extern struct kmem_cache *fm_cache; -extern struct kmem_cache *node_cache; +extern kmem_cache_t *fm_cache; +extern kmem_cache_t *node_cache; #if CONFIG_JFFS_FS_VERBOSE > 0 void diff --git a/trunk/fs/jffs2/background.c b/trunk/fs/jffs2/background.c index 6eb3daebd563..ff2a872e80e7 100644 --- a/trunk/fs/jffs2/background.c +++ b/trunk/fs/jffs2/background.c @@ -16,7 +16,6 @@ #include #include #include -#include #include "nodelist.h" diff --git a/trunk/fs/jffs2/malloc.c b/trunk/fs/jffs2/malloc.c index 83f9881ec4cc..33f291005012 100644 --- a/trunk/fs/jffs2/malloc.c +++ b/trunk/fs/jffs2/malloc.c @@ -19,16 +19,16 @@ /* These are initialised to NULL in the kernel startup code. If you're porting to other operating systems, beware */ -static struct kmem_cache *full_dnode_slab; -static struct kmem_cache *raw_dirent_slab; -static struct kmem_cache *raw_inode_slab; -static struct kmem_cache *tmp_dnode_info_slab; -static struct kmem_cache *raw_node_ref_slab; -static struct kmem_cache *node_frag_slab; -static struct kmem_cache *inode_cache_slab; +static kmem_cache_t *full_dnode_slab; +static kmem_cache_t *raw_dirent_slab; +static kmem_cache_t *raw_inode_slab; +static kmem_cache_t *tmp_dnode_info_slab; +static kmem_cache_t *raw_node_ref_slab; +static kmem_cache_t *node_frag_slab; +static kmem_cache_t *inode_cache_slab; #ifdef CONFIG_JFFS2_FS_XATTR -static struct kmem_cache *xattr_datum_cache; -static struct kmem_cache *xattr_ref_cache; +static kmem_cache_t *xattr_datum_cache; +static kmem_cache_t *xattr_ref_cache; #endif int __init jffs2_create_slab_caches(void) diff --git a/trunk/fs/jffs2/super.c b/trunk/fs/jffs2/super.c index 7deb78254021..bc4b8106a490 100644 --- a/trunk/fs/jffs2/super.c +++ b/trunk/fs/jffs2/super.c @@ -28,12 +28,12 @@ static void jffs2_put_super(struct super_block *); -static struct kmem_cache *jffs2_inode_cachep; +static kmem_cache_t *jffs2_inode_cachep; static struct inode *jffs2_alloc_inode(struct super_block *sb) { struct jffs2_inode_info *ei; - ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL); + ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode) kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); } -static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; diff --git a/trunk/fs/jfs/jfs_logmgr.c b/trunk/fs/jfs/jfs_logmgr.c index 5065baa530b6..b89c9aba0466 100644 --- a/trunk/fs/jfs/jfs_logmgr.c +++ b/trunk/fs/jfs/jfs_logmgr.c @@ -67,7 +67,7 @@ #include #include /* for sync_blockdev() */ #include -#include +#include #include #include #include "jfs_incore.h" diff --git a/trunk/fs/jfs/jfs_metapage.c b/trunk/fs/jfs/jfs_metapage.c index b1a1c7296014..0cccd1c39d75 100644 --- a/trunk/fs/jfs/jfs_metapage.c +++ b/trunk/fs/jfs/jfs_metapage.c @@ -74,7 +74,7 @@ static inline void lock_metapage(struct metapage *mp) } #define METAPOOL_MIN_PAGES 32 -static struct kmem_cache *metapage_cache; +static kmem_cache_t *metapage_cache; static mempool_t *metapage_mempool; #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) @@ -180,7 +180,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp) #endif -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct metapage *mp = (struct metapage *)foo; diff --git a/trunk/fs/jfs/jfs_txnmgr.c b/trunk/fs/jfs/jfs_txnmgr.c index d558e51b0df8..81f6f04af192 100644 --- a/trunk/fs/jfs/jfs_txnmgr.c +++ b/trunk/fs/jfs/jfs_txnmgr.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/trunk/fs/jfs/super.c b/trunk/fs/jfs/super.c index 846ac8f34513..9c1c6e0e633d 100644 --- a/trunk/fs/jfs/super.c +++ b/trunk/fs/jfs/super.c @@ -44,7 +44,7 @@ MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); MODULE_LICENSE("GPL"); -static struct kmem_cache * jfs_inode_cachep; +static kmem_cache_t * jfs_inode_cachep; static struct super_operations jfs_super_operations; static struct export_operations jfs_export_operations; @@ -93,7 +93,7 @@ void jfs_error(struct super_block *sb, const char * function, ...) va_list args; va_start(args, function); - vsnprintf(error_buf, sizeof(error_buf), function, args); + vsprintf(error_buf, function, args); va_end(args); printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); @@ -748,7 +748,7 @@ static struct file_system_type jfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; diff --git a/trunk/fs/lockd/clntproc.c b/trunk/fs/lockd/clntproc.c index 50643b6a5556..3d84f600b633 100644 --- a/trunk/fs/lockd/clntproc.c +++ b/trunk/fs/lockd/clntproc.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/fs/lockd/host.c b/trunk/fs/lockd/host.c index 3d4610c2a266..fb24a9730345 100644 --- a/trunk/fs/lockd/host.c +++ b/trunk/fs/lockd/host.c @@ -36,14 +36,34 @@ static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(void); static struct nsm_handle * __nsm_find(const struct sockaddr_in *, const char *, int, int); -static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, - const char *hostname, - int hostname_len); + +/* + * Find an NLM server handle in the cache. If there is none, create it. + */ +struct nlm_host * +nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, + const char *hostname, int hostname_len) +{ + return nlm_lookup_host(0, sin, proto, version, + hostname, hostname_len); +} + +/* + * Find an NLM client handle in the cache. If there is none, create it. + */ +struct nlm_host * +nlmsvc_lookup_host(struct svc_rqst *rqstp, + const char *hostname, int hostname_len) +{ + return nlm_lookup_host(1, &rqstp->rq_addr, + rqstp->rq_prot, rqstp->rq_vers, + hostname, hostname_len); +} /* * Common host lookup routine for server & client */ -static struct nlm_host * +struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *sin, int proto, int version, const char *hostname, @@ -174,29 +194,6 @@ nlm_destroy_host(struct nlm_host *host) kfree(host); } -/* - * Find an NLM server handle in the cache. If there is none, create it. - */ -struct nlm_host * -nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, - const char *hostname, int hostname_len) -{ - return nlm_lookup_host(0, sin, proto, version, - hostname, hostname_len); -} - -/* - * Find an NLM client handle in the cache. If there is none, create it. - */ -struct nlm_host * -nlmsvc_lookup_host(struct svc_rqst *rqstp, - const char *hostname, int hostname_len) -{ - return nlm_lookup_host(1, &rqstp->rq_addr, - rqstp->rq_prot, rqstp->rq_vers, - hostname, hostname_len); -} - /* * Create the NLM RPC client for an NLM peer */ @@ -498,7 +495,7 @@ __nsm_find(const struct sockaddr_in *sin, return nsm; } -static struct nsm_handle * +struct nsm_handle * nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len) { return __nsm_find(sin, hostname, hostname_len, 1); diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 1cb0c57fedbd..e0b6a80649a0 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -142,12 +142,12 @@ int lease_break_time = 45; static LIST_HEAD(file_lock_list); static LIST_HEAD(blocked_list); -static struct kmem_cache *filelock_cache __read_mostly; +static kmem_cache_t *filelock_cache __read_mostly; /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) { - return kmem_cache_alloc(filelock_cache, GFP_KERNEL); + return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); } static void locks_release_private(struct file_lock *fl) @@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) { struct file_lock *lock = (struct file_lock *) foo; diff --git a/trunk/fs/mbcache.c b/trunk/fs/mbcache.c index deeb9dc062d9..0ff71256e65b 100644 --- a/trunk/fs/mbcache.c +++ b/trunk/fs/mbcache.c @@ -85,7 +85,7 @@ struct mb_cache { #ifndef MB_CACHE_INDEXES_COUNT int c_indexes_count; #endif - struct kmem_cache *c_entry_cache; + kmem_cache_t *c_entry_cache; struct list_head *c_block_hash; struct list_head *c_indexes_hash[0]; }; diff --git a/trunk/fs/minix/inode.c b/trunk/fs/minix/inode.c index 629e09b38c5c..1e36bae4d0eb 100644 --- a/trunk/fs/minix/inode.c +++ b/trunk/fs/minix/inode.c @@ -51,12 +51,12 @@ static void minix_put_super(struct super_block *sb) return; } -static struct kmem_cache * minix_inode_cachep; +static kmem_cache_t * minix_inode_cachep; static struct inode *minix_alloc_inode(struct super_block *sb) { struct minix_inode_info *ei; - ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL); + ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -67,7 +67,7 @@ static void minix_destroy_inode(struct inode *inode) kmem_cache_free(minix_inode_cachep, minix_i(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct minix_inode_info *ei = (struct minix_inode_info *) foo; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index db1bca26d88c..28d49b301d55 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -249,11 +249,9 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) /* * MAY_EXEC on regular files requires special handling: We override - * filesystem execute permissions if the mode bits aren't set or - * the fs is mounted with the "noexec" flag. + * filesystem execute permissions if the mode bits aren't set. */ - if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) || - (nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC)))) + if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) return -EACCES; /* Ordinary permission routines do not understand MAY_APPEND. */ @@ -1998,7 +1996,8 @@ asmlinkage long sys_mkdir(const char __user *pathname, int mode) void dentry_unhash(struct dentry *dentry) { dget(dentry); - shrink_dcache_parent(dentry); + if (atomic_read(&dentry->d_count)) + shrink_dcache_parent(dentry); spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) == 2) diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c index b00ac84ebbdd..55442a6cf221 100644 --- a/trunk/fs/namespace.c +++ b/trunk/fs/namespace.c @@ -36,7 +36,7 @@ static int event; static struct list_head *mount_hashtable __read_mostly; static int hash_mask __read_mostly, hash_bits __read_mostly; -static struct kmem_cache *mnt_cache __read_mostly; +static kmem_cache_t *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ diff --git a/trunk/fs/ncpfs/inode.c b/trunk/fs/ncpfs/inode.c index fae53243bb92..42e3bef270c9 100644 --- a/trunk/fs/ncpfs/inode.c +++ b/trunk/fs/ncpfs/inode.c @@ -40,12 +40,12 @@ static void ncp_delete_inode(struct inode *); static void ncp_put_super(struct super_block *); static int ncp_statfs(struct dentry *, struct kstatfs *); -static struct kmem_cache * ncp_inode_cachep; +static kmem_cache_t * ncp_inode_cachep; static struct inode *ncp_alloc_inode(struct super_block *sb) { struct ncp_inode_info *ei; - ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL); + ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -56,7 +56,7 @@ static void ncp_destroy_inode(struct inode *inode) kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; @@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->rcv.ptr = (unsigned char*)&server->rcv.buf; server->rcv.len = 10; server->rcv.state = 0; - INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); - INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); + INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); + INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); sock->sk->sk_write_space = ncp_tcp_write_space; } else { - INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); - INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); + INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); + INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); server->timeout_tm.data = (unsigned long)server; server->timeout_tm.function = ncpdgram_timeout_call; } diff --git a/trunk/fs/ncpfs/sock.c b/trunk/fs/ncpfs/sock.c index e496d8b65e92..11c2b252ebed 100644 --- a/trunk/fs/ncpfs/sock.c +++ b/trunk/fs/ncpfs/sock.c @@ -350,10 +350,9 @@ static void info_server(struct ncp_server *server, unsigned int id, const void * } } -void ncpdgram_rcv_proc(struct work_struct *work) +void ncpdgram_rcv_proc(void *s) { - struct ncp_server *server = - container_of(work, struct ncp_server, rcv.tq); + struct ncp_server *server = s; struct socket* sock; sock = server->ncp_sock; @@ -469,10 +468,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) } } -void ncpdgram_timeout_proc(struct work_struct *work) +void ncpdgram_timeout_proc(void *s) { - struct ncp_server *server = - container_of(work, struct ncp_server, timeout_tq); + struct ncp_server *server = s; mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); mutex_unlock(&server->rcv.creq_mutex); @@ -654,20 +652,18 @@ skipdata:; } } -void ncp_tcp_rcv_proc(struct work_struct *work) +void ncp_tcp_rcv_proc(void *s) { - struct ncp_server *server = - container_of(work, struct ncp_server, rcv.tq); + struct ncp_server *server = s; mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); mutex_unlock(&server->rcv.creq_mutex); } -void ncp_tcp_tx_proc(struct work_struct *work) +void ncp_tcp_tx_proc(void *s) { - struct ncp_server *server = - container_of(work, struct ncp_server, tx.tq); + struct ncp_server *server = s; mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); diff --git a/trunk/fs/nfs/client.c b/trunk/fs/nfs/client.c index 23ab145daa2d..5fea638743e4 100644 --- a/trunk/fs/nfs/client.c +++ b/trunk/fs/nfs/client.c @@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, INIT_LIST_HEAD(&clp->cl_state_owners); INIT_LIST_HEAD(&clp->cl_unused); spin_lock_init(&clp->cl_lock); - INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); + INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); clp->cl_boot_time = CURRENT_TIME; clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; diff --git a/trunk/fs/nfs/direct.c b/trunk/fs/nfs/direct.c index 2f488e1d9b6c..bdfabf854a51 100644 --- a/trunk/fs/nfs/direct.c +++ b/trunk/fs/nfs/direct.c @@ -58,7 +58,7 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static struct kmem_cache *nfs_direct_cachep; +static kmem_cache_t *nfs_direct_cachep; /* * This represents a set of asynchronous requests that we're waiting on @@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) { struct nfs_direct_req *dreq; - dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); + dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); if (!dreq) return NULL; diff --git a/trunk/fs/nfs/inode.c b/trunk/fs/nfs/inode.c index 15afa460e629..08cc4c5919ab 100644 --- a/trunk/fs/nfs/inode.c +++ b/trunk/fs/nfs/inode.c @@ -55,7 +55,7 @@ static int nfs_update_inode(struct inode *, struct nfs_fattr *); static void nfs_zap_acl_cache(struct inode *); -static struct kmem_cache * nfs_inode_cachep; +static kmem_cache_t * nfs_inode_cachep; static inline unsigned long nfs_fattr_to_ino_t(struct nfs_fattr *fattr) @@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode) struct inode *nfs_alloc_inode(struct super_block *sb) { struct nfs_inode *nfsi; - nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); + nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); if (!nfsi) return NULL; nfsi->flags = 0UL; @@ -1111,7 +1111,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #endif } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; diff --git a/trunk/fs/nfs/namespace.c b/trunk/fs/nfs/namespace.c index 371b804e7cc8..ec1114b33d89 100644 --- a/trunk/fs/nfs/namespace.c +++ b/trunk/fs/nfs/namespace.c @@ -18,10 +18,10 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static void nfs_expire_automounts(struct work_struct *work); +static void nfs_expire_automounts(void *list); LIST_HEAD(nfs_automount_list); -static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); +static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); int nfs_mountpoint_expiry_timeout = 500 * HZ; static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, @@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = { .follow_link = nfs_follow_mountpoint, }; -static void nfs_expire_automounts(struct work_struct *work) +static void nfs_expire_automounts(void *data) { - struct list_head *list = &nfs_automount_list; + struct list_head *list = (struct list_head *)data; mark_mounts_for_expiry(list); if (!list_empty(list)) diff --git a/trunk/fs/nfs/nfs4_fs.h b/trunk/fs/nfs/nfs4_fs.h index c26cd978c7cc..6f346677332d 100644 --- a/trunk/fs/nfs/nfs4_fs.h +++ b/trunk/fs/nfs/nfs4_fs.h @@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2]; extern void nfs4_schedule_state_renewal(struct nfs_client *); extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); -extern void nfs4_renew_state(struct work_struct *); +extern void nfs4_renew_state(void *); /* nfs4state.c */ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); diff --git a/trunk/fs/nfs/nfs4renewd.c b/trunk/fs/nfs/nfs4renewd.c index 823298561c0a..7b6df1852e75 100644 --- a/trunk/fs/nfs/nfs4renewd.c +++ b/trunk/fs/nfs/nfs4renewd.c @@ -59,10 +59,9 @@ #define NFSDBG_FACILITY NFSDBG_PROC void -nfs4_renew_state(struct work_struct *work) +nfs4_renew_state(void *data) { - struct nfs_client *clp = - container_of(work, struct nfs_client, cl_renewd.work); + struct nfs_client *clp = (struct nfs_client *)data; struct rpc_cred *cred; long lease, timeout; unsigned long last, now; diff --git a/trunk/fs/nfs/pagelist.c b/trunk/fs/nfs/pagelist.c index 3fbfc2f03307..829af323f288 100644 --- a/trunk/fs/nfs/pagelist.c +++ b/trunk/fs/nfs/pagelist.c @@ -20,13 +20,13 @@ #define NFS_PARANOIA 1 -static struct kmem_cache *nfs_page_cachep; +static kmem_cache_t *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { struct nfs_page *p; - p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); + p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->wb_list); diff --git a/trunk/fs/nfs/read.c b/trunk/fs/nfs/read.c index 244a8c45b68e..c2e49c397a27 100644 --- a/trunk/fs/nfs/read.c +++ b/trunk/fs/nfs/read.c @@ -38,7 +38,7 @@ static int nfs_pagein_one(struct list_head *, struct inode *); static const struct rpc_call_ops nfs_read_partial_ops; static const struct rpc_call_ops nfs_read_full_ops; -static struct kmem_cache *nfs_rdata_cachep; +static kmem_cache_t *nfs_rdata_cachep; static mempool_t *nfs_rdata_mempool; #define MIN_POOL_READ (32) @@ -46,7 +46,7 @@ static mempool_t *nfs_rdata_mempool; struct nfs_read_data *nfs_readdata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); + struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); if (p) { memset(p, 0, sizeof(*p)); diff --git a/trunk/fs/nfs/write.c b/trunk/fs/nfs/write.c index 41b07288f99e..883dd4a1c157 100644 --- a/trunk/fs/nfs/write.c +++ b/trunk/fs/nfs/write.c @@ -85,7 +85,7 @@ static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_commit_ops; -static struct kmem_cache *nfs_wdata_cachep; +static kmem_cache_t *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; static mempool_t *nfs_commit_mempool; @@ -93,7 +93,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); struct nfs_write_data *nfs_commit_alloc(void) { - struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -112,7 +112,7 @@ void nfs_commit_free(struct nfs_write_data *p) struct nfs_write_data *nfs_writedata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); if (p) { memset(p, 0, sizeof(*p)); diff --git a/trunk/fs/nfsd/nfs3xdr.c b/trunk/fs/nfsd/nfs3xdr.c index 277df40f098d..b4baca3053c3 100644 --- a/trunk/fs/nfsd/nfs3xdr.c +++ b/trunk/fs/nfsd/nfs3xdr.c @@ -24,6 +24,10 @@ #define NFSDDBG_FACILITY NFSDDBG_XDR +#ifdef NFSD_OPTIMIZE_SPACE +# define inline +#endif + /* * Mapping of S_IF* types to NFS file types @@ -38,14 +42,14 @@ static u32 nfs3_ftypes[] = { /* * XDR functions for basic NFS types */ -static __be32 * +static inline __be32 * encode_time3(__be32 *p, struct timespec *time) { *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); return p; } -static __be32 * +static inline __be32 * decode_time3(__be32 *p, struct timespec *time) { time->tv_sec = ntohl(*p++); @@ -53,7 +57,7 @@ decode_time3(__be32 *p, struct timespec *time) return p; } -static __be32 * +static inline __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size; @@ -73,7 +77,7 @@ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp) return decode_fh(p, fhp); } -static __be32 * +static inline __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size = fhp->fh_handle.fh_size; @@ -87,7 +91,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp) * Decode a file name and make sure that the path contains * no slashes or null bytes. */ -static __be32 * +static inline __be32 * decode_filename(__be32 *p, char **namp, int *lenp) { char *name; @@ -103,7 +107,7 @@ decode_filename(__be32 *p, char **namp, int *lenp) return p; } -static __be32 * +static inline __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; @@ -149,7 +153,7 @@ decode_sattr3(__be32 *p, struct iattr *iap) return p; } -static __be32 * +static inline __be32 * encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { @@ -182,7 +186,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, return p; } -static __be32 * +static inline __be32 * encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct inode *inode = fhp->fh_dentry->d_inode; @@ -772,7 +776,7 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, return xdr_ressize_check(rqstp, p); } -static __be32 * +static inline __be32 * encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, ino_t ino) { @@ -786,7 +790,7 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, return p; } -static __be32 * +static inline __be32 * encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, struct svc_fh *fhp) { diff --git a/trunk/fs/nfsd/nfs4state.c b/trunk/fs/nfsd/nfs4state.c index 640c92b2a9f7..293b6495829f 100644 --- a/trunk/fs/nfsd/nfs4state.c +++ b/trunk/fs/nfsd/nfs4state.c @@ -84,10 +84,10 @@ static void nfs4_set_recdir(char *recdir); */ static DEFINE_MUTEX(client_mutex); -static struct kmem_cache *stateowner_slab = NULL; -static struct kmem_cache *file_slab = NULL; -static struct kmem_cache *stateid_slab = NULL; -static struct kmem_cache *deleg_slab = NULL; +static kmem_cache_t *stateowner_slab = NULL; +static kmem_cache_t *file_slab = NULL; +static kmem_cache_t *stateid_slab = NULL; +static kmem_cache_t *deleg_slab = NULL; void nfs4_lock_state(void) @@ -1003,7 +1003,7 @@ alloc_init_file(struct inode *ino) } static void -nfsd4_free_slab(struct kmem_cache **slab) +nfsd4_free_slab(kmem_cache_t **slab) { if (*slab == NULL) return; @@ -1829,8 +1829,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf } static struct workqueue_struct *laundry_wq; -static void laundromat_main(struct work_struct *); -static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); +static struct work_struct laundromat_work; +static void laundromat_main(void *); +static DECLARE_WORK(laundromat_work, laundromat_main, NULL); __be32 nfsd4_renew(clientid_t *clid) @@ -1939,7 +1940,7 @@ nfs4_laundromat(void) } void -laundromat_main(struct work_struct *not_used) +laundromat_main(void *not_used) { time_t t; diff --git a/trunk/fs/nfsd/nfsxdr.c b/trunk/fs/nfsd/nfsxdr.c index f5243f943996..56ebb1443e0e 100644 --- a/trunk/fs/nfsd/nfsxdr.c +++ b/trunk/fs/nfsd/nfsxdr.c @@ -18,6 +18,11 @@ #define NFSDDBG_FACILITY NFSDDBG_XDR + +#ifdef NFSD_OPTIMIZE_SPACE +# define inline +#endif + /* * Mapping of S_IF* types to NFS file types */ @@ -50,7 +55,7 @@ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp) return decode_fh(p, fhp); } -static __be32 * +static inline __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); @@ -61,7 +66,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp) * Decode a file name and make sure that the path contains * no slashes or null bytes. */ -static __be32 * +static inline __be32 * decode_filename(__be32 *p, char **namp, int *lenp) { char *name; @@ -77,7 +82,7 @@ decode_filename(__be32 *p, char **namp, int *lenp) return p; } -static __be32 * +static inline __be32 * decode_pathname(__be32 *p, char **namp, int *lenp) { char *name; @@ -93,7 +98,7 @@ decode_pathname(__be32 *p, char **namp, int *lenp) return p; } -static __be32 * +static inline __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; diff --git a/trunk/fs/nls/nls_cp936.c b/trunk/fs/nls/nls_cp936.c index 65e640c61c8b..046fde8170ea 100644 --- a/trunk/fs/nls/nls_cp936.c +++ b/trunk/fs/nls/nls_cp936.c @@ -4421,73 +4421,6 @@ static wchar_t *page_charset2uni[256] = { c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL, }; -static unsigned char u2c_00[512] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */ - 0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */ - 0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */ - 0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */ - 0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */ - 0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */ - 0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */ - 0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */ - 0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */ - 0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */ -}; - static unsigned char u2c_01[512] = { 0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */ @@ -10892,7 +10825,7 @@ static unsigned char u2c_FF[512] = { }; static unsigned char *page_uni2charset[256] = { - u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL, + NULL, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -11003,34 +10936,11 @@ static int uni2char(const wchar_t uni, unsigned char *uni2charset; unsigned char cl = uni&0xFF; unsigned char ch = (uni>>8)&0xFF; - unsigned char out0,out1; + int n; if (boundlen <= 0) return -ENAMETOOLONG; - if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */ - out[0] = 0x80; - return 1; - } - - if (ch == 0) { /* handle the U00 plane*/ - /* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/ - out0 = u2c_00[cl*2]; - out1 = u2c_00[cl*2+1]; - if (out0 == 0x00 && out1 == 0x00) { - if (cl<0x80) { - out[0] = cl; - return 1; - } - return -EINVAL; - } else { - if (boundlen <= 1) - return -ENAMETOOLONG; - out[0] = out0; - out[1] = out1; - return 2; - } - } uni2charset = page_uni2charset[ch]; if (uni2charset) { @@ -11040,10 +10950,15 @@ static int uni2char(const wchar_t uni, out[1] = uni2charset[cl*2+1]; if (out[0] == 0x00 && out[1] == 0x00) return -EINVAL; - return 2; + n = 2; + } else if (ch==0 && cl) { + out[0] = cl; + n = 1; } else return -EINVAL; + + return n; } static int char2uni(const unsigned char *rawstring, int boundlen, @@ -11057,11 +10972,7 @@ static int char2uni(const unsigned char *rawstring, int boundlen, return -ENAMETOOLONG; if (boundlen == 1) { - if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */ - *uni = 0x20ac; - } else { - *uni = rawstring[0]; - } + *uni = rawstring[0]; return 1; } @@ -11075,11 +10986,7 @@ static int char2uni(const unsigned char *rawstring, int boundlen, return -EINVAL; n = 2; } else{ - if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */ - *uni = 0x20ac; - } else { - *uni = ch; - } + *uni = ch; n = 1; } return n; diff --git a/trunk/fs/ntfs/attrib.c b/trunk/fs/ntfs/attrib.c index c577d8e1bd95..9f08e851cfb6 100644 --- a/trunk/fs/ntfs/attrib.c +++ b/trunk/fs/ntfs/attrib.c @@ -1272,7 +1272,7 @@ ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec) { ntfs_attr_search_ctx *ctx; - ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS); + ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS); if (ctx) ntfs_attr_init_search_ctx(ctx, ni, mrec); return ctx; diff --git a/trunk/fs/ntfs/index.c b/trunk/fs/ntfs/index.c index 2194eff49743..e32cde486362 100644 --- a/trunk/fs/ntfs/index.c +++ b/trunk/fs/ntfs/index.c @@ -38,7 +38,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni) { ntfs_index_context *ictx; - ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS); + ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS); if (ictx) *ictx = (ntfs_index_context){ .idx_ni = idx_ni }; return ictx; diff --git a/trunk/fs/ntfs/inode.c b/trunk/fs/ntfs/inode.c index 247989891b4b..2d3de9c89818 100644 --- a/trunk/fs/ntfs/inode.c +++ b/trunk/fs/ntfs/inode.c @@ -324,7 +324,7 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb) ntfs_inode *ni; ntfs_debug("Entering."); - ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS); + ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS); if (likely(ni != NULL)) { ni->state = 0; return VFS_I(ni); @@ -349,7 +349,7 @@ static inline ntfs_inode *ntfs_alloc_extent_inode(void) ntfs_inode *ni; ntfs_debug("Entering."); - ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS); + ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS); if (likely(ni != NULL)) { ni->state = 0; return ni; diff --git a/trunk/fs/ntfs/unistr.c b/trunk/fs/ntfs/unistr.c index 005ca4b0f132..6a495f7369f9 100644 --- a/trunk/fs/ntfs/unistr.c +++ b/trunk/fs/ntfs/unistr.c @@ -266,7 +266,7 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins, /* We do not trust outside sources. */ if (likely(ins)) { - ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS); + ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS); if (likely(ucs)) { for (i = o = 0; i < ins_len; i += wc_len) { wc_len = nls->char2uni(ins + i, ins_len - i, diff --git a/trunk/fs/ocfs2/alloc.c b/trunk/fs/ocfs2/alloc.c index edc91ca3792a..85a048b7a67b 100644 --- a/trunk/fs/ocfs2/alloc.c +++ b/trunk/fs/ocfs2/alloc.c @@ -1197,12 +1197,10 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb) return status; } -static void ocfs2_truncate_log_worker(struct work_struct *work) +static void ocfs2_truncate_log_worker(void *data) { int status; - struct ocfs2_super *osb = - container_of(work, struct ocfs2_super, - osb_truncate_log_wq.work); + struct ocfs2_super *osb = data; mlog_entry_void(); @@ -1434,8 +1432,7 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ - INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, - ocfs2_truncate_log_worker); + INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; diff --git a/trunk/fs/ocfs2/cluster/heartbeat.c b/trunk/fs/ocfs2/cluster/heartbeat.c index 4cd9a9580456..305cba3681fe 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.c +++ b/trunk/fs/ocfs2/cluster/heartbeat.c @@ -141,7 +141,7 @@ struct o2hb_region { * recognizes a node going up and down in one iteration */ u64 hr_generation; - struct delayed_work hr_write_timeout_work; + struct work_struct hr_write_timeout_work; unsigned long hr_last_timeout_start; /* Used during o2hb_check_slot to hold a copy of the block @@ -156,11 +156,9 @@ struct o2hb_bio_wait_ctxt { int wc_error; }; -static void o2hb_write_timeout(struct work_struct *work) +static void o2hb_write_timeout(void *arg) { - struct o2hb_region *reg = - container_of(work, struct o2hb_region, - hr_write_timeout_work.work); + struct o2hb_region *reg = arg; mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, @@ -1406,7 +1404,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, goto out; } - INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); + INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg); /* * A node is considered live after it has beat LIVE_THRESHOLD diff --git a/trunk/fs/ocfs2/cluster/quorum.c b/trunk/fs/ocfs2/cluster/quorum.c index 4705d659fe57..7bba98fbfc15 100644 --- a/trunk/fs/ocfs2/cluster/quorum.c +++ b/trunk/fs/ocfs2/cluster/quorum.c @@ -88,7 +88,7 @@ void o2quo_disk_timeout(void) o2quo_fence_self(); } -static void o2quo_make_decision(struct work_struct *work) +static void o2quo_make_decision(void *arg) { int quorum; int lowest_hb, lowest_reachable = 0, fence = 0; @@ -306,7 +306,7 @@ void o2quo_init(void) struct o2quo_state *qs = &o2quo_state; spin_lock_init(&qs->qs_lock); - INIT_WORK(&qs->qs_work, o2quo_make_decision); + INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); } void o2quo_exit(void) diff --git a/trunk/fs/ocfs2/cluster/tcp.c b/trunk/fs/ocfs2/cluster/tcp.c index 9b3209dc0b16..b650efa8c8be 100644 --- a/trunk/fs/ocfs2/cluster/tcp.c +++ b/trunk/fs/ocfs2/cluster/tcp.c @@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] = [O2NET_ERR_DIED] = -EHOSTDOWN,}; /* can't quite avoid *all* internal declarations :/ */ -static void o2net_sc_connect_completed(struct work_struct *work); -static void o2net_rx_until_empty(struct work_struct *work); -static void o2net_shutdown_sc(struct work_struct *work); +static void o2net_sc_connect_completed(void *arg); +static void o2net_rx_until_empty(void *arg); +static void o2net_shutdown_sc(void *arg); static void o2net_listen_data_ready(struct sock *sk, int bytes); -static void o2net_sc_send_keep_req(struct work_struct *work); +static void o2net_sc_send_keep_req(void *arg); static void o2net_idle_timer(unsigned long data); static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); @@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) o2nm_node_get(node); sc->sc_node = node; - INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); - INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); - INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); - INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); + INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); + INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); + INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); + INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); init_timer(&sc->sc_idle_timeout); sc->sc_idle_timeout.function = o2net_idle_timer; @@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, - struct delayed_work *work, + struct work_struct *work, int delay) { sc_get(sc); @@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, - struct delayed_work *work) + struct work_struct *work) { if (cancel_delayed_work(work)) sc_put(sc); @@ -564,11 +564,9 @@ static void o2net_ensure_shutdown(struct o2net_node *nn, * ourselves as state_change couldn't get the nn_lock and call set_nn_state * itself. */ -static void o2net_shutdown_sc(struct work_struct *work) +static void o2net_shutdown_sc(void *arg) { - struct o2net_sock_container *sc = - container_of(work, struct o2net_sock_container, - sc_shutdown_work); + struct o2net_sock_container *sc = arg; struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); sclog(sc, "shutting down\n"); @@ -1203,10 +1201,9 @@ static int o2net_advance_rx(struct o2net_sock_container *sc) /* this work func is triggerd by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ -static void o2net_rx_until_empty(struct work_struct *work) +static void o2net_rx_until_empty(void *arg) { - struct o2net_sock_container *sc = - container_of(work, struct o2net_sock_container, sc_rx_work); + struct o2net_sock_container *sc = arg; int ret; do { @@ -1252,11 +1249,9 @@ static int o2net_set_nodelay(struct socket *sock) /* called when a connect completes and after a sock is accepted. the * rx path will see the response and mark the sc valid */ -static void o2net_sc_connect_completed(struct work_struct *work) +static void o2net_sc_connect_completed(void *arg) { - struct o2net_sock_container *sc = - container_of(work, struct o2net_sock_container, - sc_connect_work); + struct o2net_sock_container *sc = arg; mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", (unsigned long long)O2NET_PROTOCOL_VERSION, @@ -1267,11 +1262,9 @@ static void o2net_sc_connect_completed(struct work_struct *work) } /* this is called as a work_struct func. */ -static void o2net_sc_send_keep_req(struct work_struct *work) +static void o2net_sc_send_keep_req(void *arg) { - struct o2net_sock_container *sc = - container_of(work, struct o2net_sock_container, - sc_keepalive_work.work); + struct o2net_sock_container *sc = arg; o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); sc_put(sc); @@ -1321,15 +1314,14 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) * having a connect attempt fail, etc. This centralizes the logic which decides * if a connect attempt should be made or if we should give up and all future * transmit attempts should fail */ -static void o2net_start_connect(struct work_struct *work) +static void o2net_start_connect(void *arg) { - struct o2net_node *nn = - container_of(work, struct o2net_node, nn_connect_work.work); + struct o2net_node *nn = arg; struct o2net_sock_container *sc = NULL; struct o2nm_node *node = NULL, *mynode = NULL; struct socket *sock = NULL; struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; - int ret = 0, stop; + int ret = 0; /* if we're greater we initiate tx, otherwise we accept */ if (o2nm_this_node() <= o2net_num_from_nn(nn)) @@ -1350,9 +1342,10 @@ static void o2net_start_connect(struct work_struct *work) spin_lock(&nn->nn_lock); /* see if we already have one pending or have given up */ - stop = (nn->nn_sc || nn->nn_persistent_error); + if (nn->nn_sc || nn->nn_persistent_error) + arg = NULL; spin_unlock(&nn->nn_lock); - if (stop) + if (arg == NULL) /* *shrug*, needed some indicator */ goto out; nn->nn_last_connect_attempt = jiffies; @@ -1428,10 +1421,9 @@ static void o2net_start_connect(struct work_struct *work) return; } -static void o2net_connect_expired(struct work_struct *work) +static void o2net_connect_expired(void *arg) { - struct o2net_node *nn = - container_of(work, struct o2net_node, nn_connect_expired.work); + struct o2net_node *nn = arg; spin_lock(&nn->nn_lock); if (!nn->nn_sc_valid) { @@ -1444,10 +1436,9 @@ static void o2net_connect_expired(struct work_struct *work) spin_unlock(&nn->nn_lock); } -static void o2net_still_up(struct work_struct *work) +static void o2net_still_up(void *arg) { - struct o2net_node *nn = - container_of(work, struct o2net_node, nn_still_up.work); + struct o2net_node *nn = arg; o2quo_hb_still_up(o2net_num_from_nn(nn)); } @@ -1653,9 +1644,9 @@ static int o2net_accept_one(struct socket *sock) return ret; } -static void o2net_accept_many(struct work_struct *work) +static void o2net_accept_many(void *arg) { - struct socket *sock = o2net_listen_sock; + struct socket *sock = arg; while (o2net_accept_one(sock) == 0) cond_resched(); } @@ -1709,7 +1700,7 @@ static int o2net_open_listening_sock(__be16 port) write_unlock_bh(&sock->sk->sk_callback_lock); o2net_listen_sock = sock; - INIT_WORK(&o2net_listen_work, o2net_accept_many); + INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); sock->sk->sk_reuse = 1; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); @@ -1828,10 +1819,9 @@ int o2net_init(void) struct o2net_node *nn = o2net_nn_from_num(i); spin_lock_init(&nn->nn_lock); - INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); - INIT_DELAYED_WORK(&nn->nn_connect_expired, - o2net_connect_expired); - INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); + INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); + INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); + INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); /* until we see hb from a node we'll return einval */ nn->nn_persistent_error = -ENOTCONN; init_waitqueue_head(&nn->nn_sc_wq); diff --git a/trunk/fs/ocfs2/cluster/tcp_internal.h b/trunk/fs/ocfs2/cluster/tcp_internal.h index daebbd3a2c8c..4b46aac7d243 100644 --- a/trunk/fs/ocfs2/cluster/tcp_internal.h +++ b/trunk/fs/ocfs2/cluster/tcp_internal.h @@ -86,18 +86,18 @@ struct o2net_node { * connect attempt fails and so can be self-arming. shutdown is * careful to first mark the nn such that no connects will be attempted * before canceling delayed connect work and flushing the queue. */ - struct delayed_work nn_connect_work; + struct work_struct nn_connect_work; unsigned long nn_last_connect_attempt; /* this is queued as nodes come up and is canceled when a connection is * established. this expiring gives up on the node and errors out * transmits */ - struct delayed_work nn_connect_expired; + struct work_struct nn_connect_expired; /* after we give up on a socket we wait a while before deciding * that it is still heartbeating and that we should do some * quorum work */ - struct delayed_work nn_still_up; + struct work_struct nn_still_up; }; struct o2net_sock_container { @@ -129,7 +129,7 @@ struct o2net_sock_container { struct work_struct sc_shutdown_work; struct timer_list sc_idle_timeout; - struct delayed_work sc_keepalive_work; + struct work_struct sc_keepalive_work; unsigned sc_handshake_ok:1; diff --git a/trunk/fs/ocfs2/dlm/dlmcommon.h b/trunk/fs/ocfs2/dlm/dlmcommon.h index 6b6ff76538c5..fa968180b072 100644 --- a/trunk/fs/ocfs2/dlm/dlmcommon.h +++ b/trunk/fs/ocfs2/dlm/dlmcommon.h @@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned * called functions that cannot be directly called from the * net message handlers for some reason, usually because * they need to send net messages of their own. */ -void dlm_dispatch_work(struct work_struct *work); +void dlm_dispatch_work(void *data); struct dlm_lock_resource; struct dlm_work_item; diff --git a/trunk/fs/ocfs2/dlm/dlmdomain.c b/trunk/fs/ocfs2/dlm/dlmdomain.c index 420a375a3949..f6cdab3a2c6a 100644 --- a/trunk/fs/ocfs2/dlm/dlmdomain.c +++ b/trunk/fs/ocfs2/dlm/dlmdomain.c @@ -1297,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, spin_lock_init(&dlm->work_lock); INIT_LIST_HEAD(&dlm->work_list); - INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); + INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); kref_init(&dlm->dlm_refs); dlm->dlm_state = DLM_CTXT_NEW; diff --git a/trunk/fs/ocfs2/dlm/dlmfs.c b/trunk/fs/ocfs2/dlm/dlmfs.c index 941acf14e61f..16b8d1ba7066 100644 --- a/trunk/fs/ocfs2/dlm/dlmfs.c +++ b/trunk/fs/ocfs2/dlm/dlmfs.c @@ -66,7 +66,7 @@ static struct file_operations dlmfs_file_operations; static struct inode_operations dlmfs_dir_inode_operations; static struct inode_operations dlmfs_root_inode_operations; static struct inode_operations dlmfs_file_inode_operations; -static struct kmem_cache *dlmfs_inode_cache; +static kmem_cache_t *dlmfs_inode_cache; struct workqueue_struct *user_dlm_worker; @@ -257,7 +257,7 @@ static ssize_t dlmfs_file_write(struct file *filp, } static void dlmfs_init_once(void *foo, - struct kmem_cache *cachep, + kmem_cache_t *cachep, unsigned long flags) { struct dlmfs_inode_private *ip = @@ -276,7 +276,7 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb) { struct dlmfs_inode_private *ip; - ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS); + ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS); if (!ip) return NULL; diff --git a/trunk/fs/ocfs2/dlm/dlmmaster.c b/trunk/fs/ocfs2/dlm/dlmmaster.c index 856012b4fa49..f784177b6241 100644 --- a/trunk/fs/ocfs2/dlm/dlmmaster.c +++ b/trunk/fs/ocfs2/dlm/dlmmaster.c @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dlm_dump_all_mles); #endif /* 0 */ -static struct kmem_cache *dlm_mle_cache = NULL; +static kmem_cache_t *dlm_mle_cache = NULL; static void dlm_mle_release(struct kref *kref); diff --git a/trunk/fs/ocfs2/dlm/dlmrecovery.c b/trunk/fs/ocfs2/dlm/dlmrecovery.c index fb3e2b0817f1..9d950d7cea38 100644 --- a/trunk/fs/ocfs2/dlm/dlmrecovery.c +++ b/trunk/fs/ocfs2/dlm/dlmrecovery.c @@ -153,10 +153,9 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) } /* Worker function used during recovery. */ -void dlm_dispatch_work(struct work_struct *work) +void dlm_dispatch_work(void *data) { - struct dlm_ctxt *dlm = - container_of(work, struct dlm_ctxt, dispatched_work); + struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; LIST_HEAD(tmp_list); struct list_head *iter, *iter2; struct dlm_work_item *item; diff --git a/trunk/fs/ocfs2/dlm/userdlm.c b/trunk/fs/ocfs2/dlm/userdlm.c index 7d2f578b267d..eead48bbfac6 100644 --- a/trunk/fs/ocfs2/dlm/userdlm.c +++ b/trunk/fs/ocfs2/dlm/userdlm.c @@ -171,14 +171,15 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) BUG(); } -static void user_dlm_unblock_lock(struct work_struct *work); +static void user_dlm_unblock_lock(void *opaque); static void __user_dlm_queue_lockres(struct user_lock_res *lockres) { if (!(lockres->l_flags & USER_LOCK_QUEUED)) { user_dlm_grab_inode_ref(lockres); - INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); + INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, + lockres); queue_work(user_dlm_worker, &lockres->l_work); lockres->l_flags |= USER_LOCK_QUEUED; @@ -278,11 +279,10 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) iput(inode); } -static void user_dlm_unblock_lock(struct work_struct *work) +static void user_dlm_unblock_lock(void *opaque) { int new_level, status; - struct user_lock_res *lockres = - container_of(work, struct user_lock_res, l_work); + struct user_lock_res *lockres = (struct user_lock_res *) opaque; struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); mlog(0, "processing lockres %.*s\n", lockres->l_namelen, diff --git a/trunk/fs/ocfs2/extent_map.c b/trunk/fs/ocfs2/extent_map.c index 80ac69f11d9f..fcd4475d1f89 100644 --- a/trunk/fs/ocfs2/extent_map.c +++ b/trunk/fs/ocfs2/extent_map.c @@ -61,7 +61,7 @@ struct ocfs2_em_insert_context { struct ocfs2_extent_map_entry *right_ent; }; -static struct kmem_cache *ocfs2_em_ent_cachep = NULL; +static kmem_cache_t *ocfs2_em_ent_cachep = NULL; static struct ocfs2_extent_map_entry * diff --git a/trunk/fs/ocfs2/inode.h b/trunk/fs/ocfs2/inode.h index 1a7dd2945b34..46a378fbc40b 100644 --- a/trunk/fs/ocfs2/inode.h +++ b/trunk/fs/ocfs2/inode.h @@ -106,7 +106,7 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL) #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL) -extern struct kmem_cache *ocfs2_inode_cache; +extern kmem_cache_t *ocfs2_inode_cache; extern const struct address_space_operations ocfs2_aops; diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index 1d7f4ab1e5ed..c0ad7cb59521 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -703,12 +703,11 @@ struct ocfs2_la_recovery_item { * NOTE: This function can and will sleep on recovery of other nodes * during cluster locking, just like any other ocfs2 process. */ -void ocfs2_complete_recovery(struct work_struct *work) +void ocfs2_complete_recovery(void *data) { int ret; - struct ocfs2_journal *journal = - container_of(work, struct ocfs2_journal, j_recovery_work); - struct ocfs2_super *osb = journal->j_osb; + struct ocfs2_super *osb = data; + struct ocfs2_journal *journal = osb->journal; struct ocfs2_dinode *la_dinode, *tl_dinode; struct ocfs2_la_recovery_item *item; struct list_head *p, *n; diff --git a/trunk/fs/ocfs2/journal.h b/trunk/fs/ocfs2/journal.h index 899112ad8136..d86cb960b7ec 100644 --- a/trunk/fs/ocfs2/journal.h +++ b/trunk/fs/ocfs2/journal.h @@ -133,7 +133,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, } /* Exported only for the journal struct init code in super.c. Do not call. */ -void ocfs2_complete_recovery(struct work_struct *work); +void ocfs2_complete_recovery(void *data); /* * Journal Control: diff --git a/trunk/fs/ocfs2/ocfs2.h b/trunk/fs/ocfs2/ocfs2.h index b767fd7da6eb..078883772bd6 100644 --- a/trunk/fs/ocfs2/ocfs2.h +++ b/trunk/fs/ocfs2/ocfs2.h @@ -285,7 +285,7 @@ struct ocfs2_super /* Truncate log info */ struct inode *osb_tl_inode; struct buffer_head *osb_tl_bh; - struct delayed_work osb_truncate_log_wq; + struct work_struct osb_truncate_log_wq; struct ocfs2_node_map osb_recovering_orphan_dirs; unsigned int *osb_orphan_wipes; diff --git a/trunk/fs/ocfs2/super.c b/trunk/fs/ocfs2/super.c index 4bf39540e652..b0992573dee2 100644 --- a/trunk/fs/ocfs2/super.c +++ b/trunk/fs/ocfs2/super.c @@ -68,7 +68,7 @@ #include "buffer_head_io.h" -static struct kmem_cache *ocfs2_inode_cachep = NULL; +static kmem_cache_t *ocfs2_inode_cachep = NULL; /* OCFS2 needs to schedule several differnt types of work which * require cluster locking, disk I/O, recovery waits, etc. Since these @@ -303,7 +303,7 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb) { struct ocfs2_inode_info *oi; - oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS); + oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS); if (!oi) return NULL; @@ -914,7 +914,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) } static void ocfs2_inode_init_once(void *data, - struct kmem_cache *cachep, + kmem_cache_t *cachep, unsigned long flags) { struct ocfs2_inode_info *oi = data; @@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb, spin_lock_init(&journal->j_lock); journal->j_trans_id = (unsigned long) 1; INIT_LIST_HEAD(&journal->j_la_cleanups); - INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); + INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); journal->j_state = OCFS2_JOURNAL_FREE; /* get some pseudo constants for clustersize bits */ @@ -1674,7 +1674,7 @@ void __ocfs2_error(struct super_block *sb, va_list args; va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); + vsprintf(error_buf, fmt, args); va_end(args); /* Not using mlog here because we want to show the actual @@ -1695,7 +1695,7 @@ void __ocfs2_abort(struct super_block* sb, va_list args; va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); + vsprintf(error_buf, fmt, args); va_end(args); printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", diff --git a/trunk/fs/ocfs2/uptodate.c b/trunk/fs/ocfs2/uptodate.c index 39814b900fc0..9707ed7a3206 100644 --- a/trunk/fs/ocfs2/uptodate.c +++ b/trunk/fs/ocfs2/uptodate.c @@ -69,7 +69,7 @@ struct ocfs2_meta_cache_item { sector_t c_block; }; -static struct kmem_cache *ocfs2_uptodate_cachep = NULL; +static kmem_cache_t *ocfs2_uptodate_cachep = NULL; void ocfs2_metadata_cache_init(struct inode *inode) { diff --git a/trunk/fs/openpromfs/inode.c b/trunk/fs/openpromfs/inode.c index 26f44e0074ec..592a6402e851 100644 --- a/trunk/fs/openpromfs/inode.c +++ b/trunk/fs/openpromfs/inode.c @@ -330,13 +330,13 @@ static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filld return 0; } -static struct kmem_cache *op_inode_cachep; +static kmem_cache_t *op_inode_cachep; static struct inode *openprom_alloc_inode(struct super_block *sb) { struct op_inode_info *oi; - oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL); + oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL); if (!oi) return NULL; @@ -415,7 +415,7 @@ static struct file_system_type openprom_fs_type = { .kill_sb = kill_anon_super, }; -static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags) +static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags) { struct op_inode_info *oi = (struct op_inode_info *) data; diff --git a/trunk/fs/partitions/amiga.c b/trunk/fs/partitions/amiga.c index 9917a8c360f2..3068528890a6 100644 --- a/trunk/fs/partitions/amiga.c +++ b/trunk/fs/partitions/amiga.c @@ -43,7 +43,6 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev) if (warn_no_part) printk("Dev %s: unable to read RDB block %d\n", bdevname(bdev, b), blk); - res = -1; goto rdb_done; } if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) @@ -80,7 +79,6 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev) if (warn_no_part) printk("Dev %s: unable to read partition block %d\n", bdevname(bdev, b), blk); - res = -1; goto rdb_done; } pb = (struct PartitionBlock *)data; diff --git a/trunk/fs/partitions/atari.c b/trunk/fs/partitions/atari.c index 1f3572d5b755..192a6adfdefd 100644 --- a/trunk/fs/partitions/atari.c +++ b/trunk/fs/partitions/atari.c @@ -88,7 +88,7 @@ int atari_partition(struct parsed_partitions *state, struct block_device *bdev) if (!xrs) { printk (" block %ld read failed\n", partsect); put_dev_sector(sect); - return -1; + return 0; } /* ++roman: sanity check: bit 0 of flg field must be set */ diff --git a/trunk/fs/partitions/check.c b/trunk/fs/partitions/check.c index 1901137f4eca..6fb4b6150d77 100644 --- a/trunk/fs/partitions/check.c +++ b/trunk/fs/partitions/check.c @@ -153,7 +153,7 @@ static struct parsed_partitions * check_partition(struct gendisk *hd, struct block_device *bdev) { struct parsed_partitions *state; - int i, res, err; + int i, res; state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL); if (!state) @@ -165,30 +165,19 @@ check_partition(struct gendisk *hd, struct block_device *bdev) sprintf(state->name, "p"); state->limit = hd->minors; - i = res = err = 0; + i = res = 0; while (!res && check_part[i]) { memset(&state->parts, 0, sizeof(state->parts)); res = check_part[i++](state, bdev); - if (res < 0) { - /* We have hit an I/O error which we don't report now. - * But record it, and let the others do their job. - */ - err = res; - res = 0; - } - } if (res > 0) return state; - if (!err) - /* The partition is unrecognized. So report I/O errors if there were any */ - res = err; if (!res) printk(" unknown partition table\n"); else if (warn_no_part) printk(" unable to read partition table\n"); kfree(state); - return ERR_PTR(res); + return NULL; } /* @@ -505,8 +494,6 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) disk->fops->revalidate_disk(disk); if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) return 0; - if (IS_ERR(state)) /* I/O error reading the partition table */ - return PTR_ERR(state); for (p = 1; p < state->limit; p++) { sector_t size = state->parts[p].size; sector_t from = state->parts[p].from; diff --git a/trunk/fs/partitions/ibm.c b/trunk/fs/partitions/ibm.c index 9f7ad4244f63..d352a7381fed 100644 --- a/trunk/fs/partitions/ibm.c +++ b/trunk/fs/partitions/ibm.c @@ -43,7 +43,7 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { int ibm_partition(struct parsed_partitions *state, struct block_device *bdev) { - int blocksize, offset, size,res; + int blocksize, offset, size; loff_t i_size; dasd_information_t *info; struct hd_geometry *geo; @@ -56,16 +56,15 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) unsigned char *data; Sector sect; - res = 0; blocksize = bdev_hardsect_size(bdev); if (blocksize <= 0) - goto out_exit; + return 0; i_size = i_size_read(bdev->bd_inode); if (i_size == 0) - goto out_exit; + return 0; if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL) - goto out_exit; + goto out_noinfo; if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) goto out_nogeo; if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL) @@ -73,7 +72,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) - goto out_freeall; + goto out_noioctl; /* * Get volume label, extract name and type. @@ -93,8 +92,6 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) EBCASC(type, 4); EBCASC(name, 6); - res = 1; - /* * Three different types: CMS1, VOL1 and LNX1/unlabeled */ @@ -159,9 +156,6 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) counter++; blk++; } - if (!data) - /* Are we not supposed to report this ? */ - goto out_readerr; } else { /* * Old style LNX1 or unlabeled disk @@ -177,17 +171,18 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) } printk("\n"); - goto out_freeall; - + kfree(label); + kfree(geo); + kfree(info); + return 1; out_readerr: - res = -1; -out_freeall: +out_noioctl: kfree(label); out_nolab: kfree(geo); out_nogeo: kfree(info); -out_exit: - return res; +out_noinfo: + return 0; } diff --git a/trunk/fs/pipe.c b/trunk/fs/pipe.c index ae36b89b1a37..b1626f269a34 100644 --- a/trunk/fs/pipe.c +++ b/trunk/fs/pipe.c @@ -830,14 +830,7 @@ void free_pipe_info(struct inode *inode) static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { - /* - * At creation time, we pretended this dentry was hashed - * (by clearing DCACHE_UNHASHED bit in d_flags) - * At delete time, we restore the truth : not hashed. - * (so that dput() can proceed correctly) - */ - dentry->d_flags |= DCACHE_UNHASHED; - return 0; + return 1; } static struct dentry_operations pipefs_dentry_operations = { @@ -898,22 +891,17 @@ struct file *create_write_pipe(void) if (!inode) goto err_file; - this.len = sprintf(name, "[%lu]", inode->i_ino); + sprintf(name, "[%lu]", inode->i_ino); this.name = name; - this.hash = 0; + this.len = strlen(name); + this.hash = inode->i_ino; /* will go */ err = -ENOMEM; dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); if (!dentry) goto err_inode; dentry->d_op = &pipefs_dentry_operations; - /* - * We dont want to publish this dentry into global dentry hash table. - * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED - * This permits a working /proc/$pid/fd/XXX on pipes - */ - dentry->d_flags &= ~DCACHE_UNHASHED; - d_instantiate(dentry, inode); + d_add(dentry, inode); f->f_vfsmnt = mntget(pipe_mnt); f->f_dentry = dentry; f->f_mapping = inode->i_mapping; diff --git a/trunk/fs/proc/Makefile b/trunk/fs/proc/Makefile index f6c776272572..7431d7ba2d09 100644 --- a/trunk/fs/proc/Makefile +++ b/trunk/fs/proc/Makefile @@ -8,9 +8,8 @@ proc-y := nommu.o task_nommu.o proc-$(CONFIG_MMU) := mmu.o task_mmu.o proc-y += inode.o root.o base.o generic.o array.o \ - proc_tty.o proc_misc.o + kmsg.o proc_tty.o proc_misc.o proc-$(CONFIG_PROC_KCORE) += kcore.o proc-$(CONFIG_PROC_VMCORE) += vmcore.o proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o -proc-$(CONFIG_PRINTK) += kmsg.o diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index b859fc749c07..795319c54f72 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -683,6 +683,8 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, char buffer[PROC_NUMBUF], *end; int oom_adjust; + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; @@ -697,10 +699,6 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, task = get_proc_task(file->f_dentry->d_inode); if (!task) return -ESRCH; - if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { - put_task_struct(task); - return -EACCES; - } task->oomkilladj = oom_adjust; put_task_struct(task); if (end - buffer == 0) @@ -1885,9 +1883,8 @@ void proc_flush_task(struct task_struct *task) return; } -static struct dentry *proc_pid_instantiate(struct inode *dir, - struct dentry * dentry, - struct task_struct *task, void *ptr) +struct dentry *proc_pid_instantiate(struct inode *dir, + struct dentry * dentry, struct task_struct *task, void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; diff --git a/trunk/fs/proc/inode.c b/trunk/fs/proc/inode.c index e26945ba685b..49dfb2ab783e 100644 --- a/trunk/fs/proc/inode.c +++ b/trunk/fs/proc/inode.c @@ -81,14 +81,14 @@ static void proc_read_inode(struct inode * inode) inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; } -static struct kmem_cache * proc_inode_cachep; +static kmem_cache_t * proc_inode_cachep; static struct inode *proc_alloc_inode(struct super_block *sb) { struct proc_inode *ei; struct inode *inode; - ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); + ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; ei->pid = NULL; @@ -105,7 +105,7 @@ static void proc_destroy_inode(struct inode *inode) kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct proc_inode *ei = (struct proc_inode *) foo; diff --git a/trunk/fs/proc/kcore.c b/trunk/fs/proc/kcore.c index 1be73082edd3..1294eda4acae 100644 --- a/trunk/fs/proc/kcore.c +++ b/trunk/fs/proc/kcore.c @@ -22,7 +22,6 @@ #include #include -#define CORE_STR "CORE" static int open_kcore(struct inode * inode, struct file * filp) { @@ -83,11 +82,10 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) } *elf_buflen = sizeof(struct elfhdr) + (*nphdr + 2)*sizeof(struct elf_phdr) + - 3 * ((sizeof(struct elf_note)) + - roundup(sizeof(CORE_STR), 4)) + - roundup(sizeof(struct elf_prstatus), 4) + - roundup(sizeof(struct elf_prpsinfo), 4) + - roundup(sizeof(struct task_struct), 4); + 3 * (sizeof(struct elf_note) + 4) + + sizeof(struct elf_prstatus) + + sizeof(struct elf_prpsinfo) + + sizeof(struct task_struct); *elf_buflen = PAGE_ALIGN(*elf_buflen); return size + *elf_buflen; } @@ -212,7 +210,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) nhdr->p_offset = offset; /* set up the process status */ - notes[0].name = CORE_STR; + notes[0].name = "CORE"; notes[0].type = NT_PRSTATUS; notes[0].datasz = sizeof(struct elf_prstatus); notes[0].data = &prstatus; @@ -223,7 +221,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) bufp = storenote(¬es[0], bufp); /* set up the process info */ - notes[1].name = CORE_STR; + notes[1].name = "CORE"; notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(struct elf_prpsinfo); notes[1].data = &prpsinfo; @@ -240,7 +238,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) bufp = storenote(¬es[1], bufp); /* set up the task structure */ - notes[2].name = CORE_STR; + notes[2].name = "CORE"; notes[2].type = NT_TASKSTRUCT; notes[2].datasz = sizeof(struct task_struct); notes[2].data = current; diff --git a/trunk/fs/proc/proc_misc.c b/trunk/fs/proc/proc_misc.c index 51815cece6f3..93c43b676e59 100644 --- a/trunk/fs/proc/proc_misc.c +++ b/trunk/fs/proc/proc_misc.c @@ -696,11 +696,9 @@ void __init proc_misc_init(void) proc_symlink("mounts", NULL, "self/mounts"); /* And now for trickier ones */ -#ifdef CONFIG_PRINTK entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); if (entry) entry->proc_fops = &proc_kmsg_operations; -#endif create_seq_entry("devices", 0, &proc_devinfo_operations); create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); #ifdef CONFIG_BLOCK diff --git a/trunk/fs/qnx4/inode.c b/trunk/fs/qnx4/inode.c index c047dc654d5c..5a41db2a218d 100644 --- a/trunk/fs/qnx4/inode.c +++ b/trunk/fs/qnx4/inode.c @@ -515,12 +515,12 @@ static void qnx4_read_inode(struct inode *inode) brelse(bh); } -static struct kmem_cache *qnx4_inode_cachep; +static kmem_cache_t *qnx4_inode_cachep; static struct inode *qnx4_alloc_inode(struct super_block *sb) { struct qnx4_inode_info *ei; - ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -531,7 +531,7 @@ static void qnx4_destroy_inode(struct inode *inode) kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); } -static void init_once(void *foo, struct kmem_cache * cachep, +static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) { struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; diff --git a/trunk/fs/reiserfs/file.c b/trunk/fs/reiserfs/file.c index 373d862c3f87..ac14318c81ba 100644 --- a/trunk/fs/reiserfs/file.c +++ b/trunk/fs/reiserfs/file.c @@ -317,11 +317,12 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl /* area filled with zeroes, to supply as list of zero blocknumbers We allocate it outside of loop just in case loop would spin for several iterations. */ - char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. + char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. if (!zeros) { res = -ENOMEM; goto error_exit_free_blocks; } + memset(zeros, 0, to_paste * UNFM_P_SIZE); do { to_paste = min_t(__u64, hole_size, @@ -406,8 +407,6 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl we restart it. This will also free the path. */ if (journal_transaction_should_end (th, th->t_blocks_allocated)) { - inode->i_size = cpu_key_k_offset(&key) + - (to_paste << inode->i_blkbits); res = restart_transaction(th, inode, &path); @@ -1046,7 +1045,6 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0); memset(kaddr, 0, from); kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(prepared_pages[0]); } if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */ char *kaddr = @@ -1054,7 +1052,6 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode KM_USER0); memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(prepared_pages[num_pages - 1]); } /* Since all blocks are new - use already calculated value */ @@ -1188,7 +1185,6 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode memset(kaddr + block_start, 0, from - block_start); kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(prepared_pages[0]); set_buffer_uptodate(bh); } } @@ -1226,7 +1222,6 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode KM_USER0); memset(kaddr + to, 0, block_end - to); kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(prepared_pages[num_pages - 1]); set_buffer_uptodate(bh); } } @@ -1312,8 +1307,56 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t count = MAX_NON_LFS - (unsigned long)*ppos; } - if (file->f_flags & O_DIRECT) - return do_sync_write(file, buf, count, ppos); + if (file->f_flags & O_DIRECT) { // Direct IO needs treatment + ssize_t result, after_file_end = 0; + if ((*ppos + count >= inode->i_size) + || (file->f_flags & O_APPEND)) { + /* If we are appending a file, we need to put this savelink in here. + If we will crash while doing direct io, finish_unfinished will + cut the garbage from the file end. */ + reiserfs_write_lock(inode->i_sb); + err = + journal_begin(&th, inode->i_sb, + JOURNAL_PER_BALANCE_CNT); + if (err) { + reiserfs_write_unlock(inode->i_sb); + return err; + } + reiserfs_update_inode_transaction(inode); + add_save_link(&th, inode, 1 /* Truncate */ ); + after_file_end = 1; + err = + journal_end(&th, inode->i_sb, + JOURNAL_PER_BALANCE_CNT); + reiserfs_write_unlock(inode->i_sb); + if (err) + return err; + } + result = do_sync_write(file, buf, count, ppos); + + if (after_file_end) { /* Now update i_size and remove the savelink */ + struct reiserfs_transaction_handle th; + reiserfs_write_lock(inode->i_sb); + err = journal_begin(&th, inode->i_sb, 1); + if (err) { + reiserfs_write_unlock(inode->i_sb); + return err; + } + reiserfs_update_inode_transaction(inode); + mark_inode_dirty(inode); + err = journal_end(&th, inode->i_sb, 1); + if (err) { + reiserfs_write_unlock(inode->i_sb); + return err; + } + err = remove_save_link(inode, 1 /* truncate */ ); + reiserfs_write_unlock(inode->i_sb); + if (err) + return err; + } + + return result; + } if (unlikely((ssize_t) count < 0)) return -EINVAL; diff --git a/trunk/fs/reiserfs/inode.c b/trunk/fs/reiserfs/inode.c index 254239e6f9e3..9c69bcacad22 100644 --- a/trunk/fs/reiserfs/inode.c +++ b/trunk/fs/reiserfs/inode.c @@ -216,12 +216,11 @@ static int file_capable(struct inode *inode, long block) BUG_ON(!th->t_trans_id); BUG_ON(!th->t_refcount); - pathrelse(path); - /* we cannot restart while nested */ if (th->t_refcount > 1) { return 0; } + pathrelse(path); reiserfs_update_sd(th, inode); err = journal_end(th, s, len); if (!err) { @@ -929,12 +928,15 @@ int reiserfs_get_block(struct inode *inode, sector_t block, if (blocks_needed == 1) { un = &unf_single; } else { - un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. + un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. if (!un) { un = &unf_single; blocks_needed = 1; max_to_insert = 0; - } + } else + memset(un, 0, + UNFM_P_SIZE * min(blocks_needed, + max_to_insert)); } if (blocks_needed <= max_to_insert) { /* we are going to add target block to the file. Use allocated diff --git a/trunk/fs/reiserfs/journal.c b/trunk/fs/reiserfs/journal.c index 7280a23ef344..ac93174c9639 100644 --- a/trunk/fs/reiserfs/journal.c +++ b/trunk/fs/reiserfs/journal.c @@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); -static void flush_async_commits(struct work_struct *work); +static void flush_async_commits(void *p); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ @@ -2836,8 +2836,7 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, if (reiserfs_mounted_fs_count <= 1) commit_wq = create_workqueue("reiserfs"); - INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); - journal->j_work_sb = p_s_sb; + INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); return 0; free_and_return: free_journal_ram(p_s_sb); @@ -3448,11 +3447,10 @@ int journal_end_sync(struct reiserfs_transaction_handle *th, /* ** writeback the pending async commits to disk */ -static void flush_async_commits(struct work_struct *work) +static void flush_async_commits(void *p) { - struct reiserfs_journal *journal = - container_of(work, struct reiserfs_journal, j_work.work); - struct super_block *p_s_sb = journal->j_work_sb; + struct super_block *p_s_sb = p; + struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); struct reiserfs_journal_list *jl; struct list_head *entry; diff --git a/trunk/fs/reiserfs/super.c b/trunk/fs/reiserfs/super.c index 7fb5fb036f90..17249994110f 100644 --- a/trunk/fs/reiserfs/super.c +++ b/trunk/fs/reiserfs/super.c @@ -490,13 +490,13 @@ static void reiserfs_put_super(struct super_block *s) return; } -static struct kmem_cache *reiserfs_inode_cachep; +static kmem_cache_t *reiserfs_inode_cachep; static struct inode *reiserfs_alloc_inode(struct super_block *sb) { struct reiserfs_inode_info *ei; ei = (struct reiserfs_inode_info *) - kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL); + kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -507,7 +507,7 @@ static void reiserfs_destroy_inode(struct inode *inode) kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; @@ -1549,12 +1549,13 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) struct reiserfs_sb_info *sbi; int errval = -EINVAL; - sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); + sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); if (!sbi) { errval = -ENOMEM; goto error; } s->s_fs_info = sbi; + memset(sbi, 0, sizeof(struct reiserfs_sb_info)); /* Set default values for options: non-aggressive tails, RO on errors */ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); diff --git a/trunk/fs/romfs/inode.c b/trunk/fs/romfs/inode.c index c5af088d4a4c..ddcd9e1ef282 100644 --- a/trunk/fs/romfs/inode.c +++ b/trunk/fs/romfs/inode.c @@ -550,12 +550,12 @@ romfs_read_inode(struct inode *i) } } -static struct kmem_cache * romfs_inode_cachep; +static kmem_cache_t * romfs_inode_cachep; static struct inode *romfs_alloc_inode(struct super_block *sb) { struct romfs_inode_info *ei; - ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL); + ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -566,7 +566,7 @@ static void romfs_destroy_inode(struct inode *inode) kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; diff --git a/trunk/fs/seq_file.c b/trunk/fs/seq_file.c index 10690aa401c7..555b9ac04c25 100644 --- a/trunk/fs/seq_file.c +++ b/trunk/fs/seq_file.c @@ -26,7 +26,7 @@ * ERR_PTR(error). In the end of sequence they return %NULL. ->show() * returns 0 in case of success and negative number in case of error. */ -int seq_open(struct file *file, const struct seq_operations *op) +int seq_open(struct file *file, struct seq_operations *op) { struct seq_file *p = file->private_data; @@ -408,7 +408,7 @@ EXPORT_SYMBOL(single_open); int single_release(struct inode *inode, struct file *file) { - const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; + struct seq_operations *op = ((struct seq_file *)file->private_data)->op; int res = seq_release(inode, file); kfree(op); return res; diff --git a/trunk/fs/smbfs/inode.c b/trunk/fs/smbfs/inode.c index 4af4cd729a5a..2c122ee83adb 100644 --- a/trunk/fs/smbfs/inode.c +++ b/trunk/fs/smbfs/inode.c @@ -50,12 +50,12 @@ static void smb_put_super(struct super_block *); static int smb_statfs(struct dentry *, struct kstatfs *); static int smb_show_options(struct seq_file *, struct vfsmount *); -static struct kmem_cache *smb_inode_cachep; +static kmem_cache_t *smb_inode_cachep; static struct inode *smb_alloc_inode(struct super_block *sb) { struct smb_inode_info *ei; - ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL); + ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -66,7 +66,7 @@ static void smb_destroy_inode(struct inode *inode) kmem_cache_free(smb_inode_cachep, SMB_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct smb_inode_info *ei = (struct smb_inode_info *) foo; unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR; diff --git a/trunk/fs/smbfs/request.c b/trunk/fs/smbfs/request.c index a4bcae8a9aff..0fb74697abc4 100644 --- a/trunk/fs/smbfs/request.c +++ b/trunk/fs/smbfs/request.c @@ -25,7 +25,7 @@ #define ROUND_UP(x) (((x)+3) & ~3) /* cache for request structures */ -static struct kmem_cache *req_cachep; +static kmem_cache_t *req_cachep; static int smb_request_send_req(struct smb_request *req); @@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server, struct smb_request *req; unsigned char *buf = NULL; - req = kmem_cache_alloc(req_cachep, GFP_KERNEL); + req = kmem_cache_alloc(req_cachep, SLAB_KERNEL); VERBOSE("allocating request: %p\n", req); if (!req) goto out; diff --git a/trunk/fs/stat.c b/trunk/fs/stat.c index a0ebfc7f8a64..bca07eb2003c 100644 --- a/trunk/fs/stat.c +++ b/trunk/fs/stat.c @@ -51,6 +51,13 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) return inode->i_op->getattr(mnt, dentry, stat); generic_fillattr(inode, stat); + if (!stat->blksize) { + struct super_block *s = inode->i_sb; + unsigned blocks; + blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits; + stat->blocks = (s->s_blocksize / 512) * blocks; + stat->blksize = s->s_blocksize; + } return 0; } diff --git a/trunk/fs/sysfs/mount.c b/trunk/fs/sysfs/mount.c index e503f858fba8..20551a1b8a56 100644 --- a/trunk/fs/sysfs/mount.c +++ b/trunk/fs/sysfs/mount.c @@ -16,7 +16,7 @@ struct vfsmount *sysfs_mount; struct super_block * sysfs_sb = NULL; -struct kmem_cache *sysfs_dir_cachep; +kmem_cache_t *sysfs_dir_cachep; static struct super_operations sysfs_ops = { .statfs = simple_statfs, diff --git a/trunk/fs/sysfs/sysfs.h b/trunk/fs/sysfs/sysfs.h index bd7cec295dab..6f3d6bd52887 100644 --- a/trunk/fs/sysfs/sysfs.h +++ b/trunk/fs/sysfs/sysfs.h @@ -1,6 +1,6 @@ extern struct vfsmount * sysfs_mount; -extern struct kmem_cache *sysfs_dir_cachep; +extern kmem_cache_t *sysfs_dir_cachep; extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); diff --git a/trunk/fs/sysv/CHANGES b/trunk/fs/sysv/CHANGES new file mode 100644 index 000000000000..66ea6e92be66 --- /dev/null +++ b/trunk/fs/sysv/CHANGES @@ -0,0 +1,60 @@ +Mon, 15 Dec 1997 Krzysztof G. Baranowski + * namei.c: struct sysv_dir_inode_operations updated to use dentries. + +Fri, 23 Jan 1998 Krzysztof G. Baranowski + * inode.c: corrected 1 track offset setting (in sb->sv_block_base). + Originally it was overridden (by setting to zero) + in detected_[xenix,sysv4,sysv2,coherent]. Thanks + to Andrzej Krzysztofowicz + for identifying the problem. + +Tue, 27 Jan 1998 Krzysztof G. Baranowski + * inode.c: added 2048-byte block support to SystemV FS. + Merged detected_bs[512,1024,2048]() into one function: + void detected_bs (u_char type, struct super_block *sb). + Thanks to Andrzej Krzysztofowicz + for the patch. + +Wed, 4 Feb 1998 Krzysztof G. Baranowski + * namei.c: removed static subdir(); is_subdir() from dcache.c + is used instead. Cosmetic changes. + +Thu, 3 Dec 1998 Al Viro (viro@parcelfarce.linux.theplanet.co.uk) + * namei.c (sysv_rmdir): + Bugectomy: old check for victim being busy + (inode->i_count) wasn't replaced (with checking + dentry->d_count) and escaped Linus in the last round + of changes. Shot and buried. + +Wed, 9 Dec 1998 AV + * namei.c (do_sysv_rename): + Fixed incorrect check for other owners + race. + Removed checks that went to VFS. + * namei.c (sysv_unlink): + Removed checks that went to VFS. + +Thu, 10 Dec 1998 AV + * namei.c (do_mknod): + Removed dead code - mknod is never asked to + create a symlink or directory. Incidentially, + it wouldn't do it right if it would be called. + +Sat, 26 Dec 1998 KGB + * inode.c (detect_sysv4): + Added detection of expanded s_type field (0x10, + 0x20 and 0x30). Forced read-only access in this case. + +Sun, 21 Mar 1999 AV + * namei.c (sysv_link): + Fixed i_count usage that resulted in dcache corruption. + * inode.c: + Filled ->delete_inode() method with sysv_delete_inode(). + sysv_put_inode() is gone, as it tried to do ->delete_ + _inode()'s job. + * ialloc.c: (sysv_free_inode): + Fixed race. + +Sun, 30 Apr 1999 AV + * namei.c (sysv_mknod): + Removed dead code (S_IFREG case is now passed to + ->create() by VFS). diff --git a/trunk/fs/sysv/ChangeLog b/trunk/fs/sysv/ChangeLog new file mode 100644 index 000000000000..f403f8b91b80 --- /dev/null +++ b/trunk/fs/sysv/ChangeLog @@ -0,0 +1,106 @@ +Thu Feb 14 2002 Andrew Morton + + * dir_commit_chunk(): call writeout_one_page() as well as + waitfor_one_page() for IS_SYNC directories, so that we + actually do sync the directory. (forward-port from 2.4). + +Thu Feb 7 2002 Alexander Viro + + * super.c: switched to ->get_sb() + * ChangeLog: fixed dates ;-) + +2002-01-24 David S. Miller + + * inode.c: Include linux/init.h + +Mon Jan 21 2002 Alexander Viro + * ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out. + * i_vnode renamed to vfs_inode. Sorry, but let's keep that + consistent. + +Sat Jan 19 2002 Christoph Hellwig + + * include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using + list_entry() instead of inode->u. + * include/linux/sysv_fs_i.h: Add 'struct inode i_vnode' field to + sysv_inode_info structure. + * inode.c: Include , implement alloc_inode/destroy_inode + sop methods, add infrastructure for per-fs inode slab cache. + * super.c (init_sysv_fs): Initialize inode cache, recover properly + in the case of failed register_filesystem for V7. + (exit_sysv_fs): Destroy inode cache. + +Sat Jan 19 2002 Christoph Hellwig + + * include/linux/sysv_fs.h: Include , declare SYSV_I(). + * dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to + access fs-private inode data. + * ialloc.c (sysv_new_inode): Likewise. + * inode.c (sysv_read_inode): Likewise. + (sysv_update_inode): Likewise. + * itree.c (get_branch): Likewise. + (sysv_truncate): Likewise. + * symlink.c (sysv_readlink): Likewise. + (sysv_follow_link): Likewise. + +Fri Jan 4 2002 Alexander Viro + + * ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname(). + * inode.c (sysv_read_inode): Likewise. + (sysv_update_inode): Likewise. + (sysv_sync_inode): Likewise. + * super.c (detect_sysv): Likewise. + (complete_read_super): Likewise. + (sysv_read_super): Likewise. + (v7_read_super): Likewise. + +Sun Dec 30 2001 Manfred Spraul + + * dir.c (dir_commit_chunk): Do not set dir->i_version. + (sysv_readdir): Likewise. + +Thu Dec 27 2001 Alexander Viro + + * itree.c (get_block): Use map_bh() to fill out bh_result. + +Tue Dec 25 2001 Alexander Viro + + * super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize. + (v7_read_super): Likewise. + +Tue Nov 27 2001 Alexander Viro + + * itree.c (get_block): Change type for iblock argument to sector_t. + * super.c (sysv_read_super): Set s_blocksize early. + (v7_read_super): Likewise. + * balloc.c (sysv_new_block): Use sb_bread(). instead of bread(). + (sysv_count_free_blocks): Likewise. + * ialloc.c (sysv_raw_inode): Likewise. + * itree.c (get_branch): Likewise. + (free_branches): Likewise. + * super.c (sysv_read_super): Likewise. + (v7_read_super): Likewise. + +Sat Dec 15 2001 Christoph Hellwig + + * inode.c (sysv_read_inode): Mark inode as bad in case of failure. + * super.c (complete_read_super): Check for bad root inode. + +Wed Nov 21 2001 Andrew Morton + + * file.c (sysv_sync_file): Call fsync_inode_data_buffers. + +Fri Oct 26 2001 Christoph Hellwig + + * dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h: + Implement per-Inode lookup offset cache. + Modelled after Ted's ext2 patch. + +Fri Oct 26 2001 Christoph Hellwig + + * inode.c, super.c, include/linux/sysv_fs.h, + include/linux/sysv_fs_sb.h: + Remove symlink faking. Noone really wants to use these as + linux filesystems and native OSes don't support it anyway. + + diff --git a/trunk/fs/sysv/INTRO b/trunk/fs/sysv/INTRO new file mode 100644 index 000000000000..de4e4d17cac6 --- /dev/null +++ b/trunk/fs/sysv/INTRO @@ -0,0 +1,182 @@ +This is the implementation of the SystemV/Coherent filesystem for Linux. +It grew out of separate filesystem implementations + + Xenix FS Doug Evans June 1992 + SystemV FS Paul B. Monday March-June 1993 + Coherent FS B. Haible June 1993 + +and was merged together in July 1993. + +These filesystems are rather similar. Here is a comparison with Minix FS: + +* Linux fdisk reports on partitions + - Minix FS 0x81 Linux/Minix + - Xenix FS ?? + - SystemV FS ?? + - Coherent FS 0x08 AIX bootable + +* Size of a block or zone (data allocation unit on disk) + - Minix FS 1024 + - Xenix FS 1024 (also 512 ??) + - SystemV FS 1024 (also 512 and 2048) + - Coherent FS 512 + +* General layout: all have one boot block, one super block and + separate areas for inodes and for directories/data. + On SystemV Release 2 FS (e.g. Microport) the first track is reserved and + all the block numbers (including the super block) are offset by one track. + +* Byte ordering of "short" (16 bit entities) on disk: + - Minix FS little endian 0 1 + - Xenix FS little endian 0 1 + - SystemV FS little endian 0 1 + - Coherent FS little endian 0 1 + Of course, this affects only the file system, not the data of files on it! + +* Byte ordering of "long" (32 bit entities) on disk: + - Minix FS little endian 0 1 2 3 + - Xenix FS little endian 0 1 2 3 + - SystemV FS little endian 0 1 2 3 + - Coherent FS PDP-11 2 3 0 1 + Of course, this affects only the file system, not the data of files on it! + +* Inode on disk: "short", 0 means non-existent, the root dir ino is: + - Minix FS 1 + - Xenix FS, SystemV FS, Coherent FS 2 + +* Maximum number of hard links to a file: + - Minix FS 250 + - Xenix FS ?? + - SystemV FS ?? + - Coherent FS >=10000 + +* Free inode management: + - Minix FS a bitmap + - Xenix FS, SystemV FS, Coherent FS + There is a cache of a certain number of free inodes in the super-block. + When it is exhausted, new free inodes are found using a linear search. + +* Free block management: + - Minix FS a bitmap + - Xenix FS, SystemV FS, Coherent FS + Free blocks are organized in a "free list". Maybe a misleading term, + since it is not true that every free block contains a pointer to + the next free block. Rather, the free blocks are organized in chunks + of limited size, and every now and then a free block contains pointers + to the free blocks pertaining to the next chunk; the first of these + contains pointers and so on. The list terminates with a "block number" + 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS. + +* Super-block location: + - Minix FS block 1 = bytes 1024..2047 + - Xenix FS block 1 = bytes 1024..2047 + - SystemV FS bytes 512..1023 + - Coherent FS block 1 = bytes 512..1023 + +* Super-block layout: + - Minix FS + unsigned short s_ninodes; + unsigned short s_nzones; + unsigned short s_imap_blocks; + unsigned short s_zmap_blocks; + unsigned short s_firstdatazone; + unsigned short s_log_zone_size; + unsigned long s_max_size; + unsigned short s_magic; + - Xenix FS, SystemV FS, Coherent FS + unsigned short s_firstdatazone; + unsigned long s_nzones; + unsigned short s_fzone_count; + unsigned long s_fzones[NICFREE]; + unsigned short s_finode_count; + unsigned short s_finodes[NICINOD]; + char s_flock; + char s_ilock; + char s_modified; + char s_rdonly; + unsigned long s_time; + short s_dinfo[4]; -- SystemV FS only + unsigned long s_free_zones; + unsigned short s_free_inodes; + short s_dinfo[4]; -- Xenix FS only + unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only + char s_fname[6]; + char s_fpack[6]; + then they differ considerably: + Xenix FS + char s_clean; + char s_fill[371]; + long s_magic; + long s_type; + SystemV FS + long s_fill[12 or 14]; + long s_state; + long s_magic; + long s_type; + Coherent FS + unsigned long s_unique; + Note that Coherent FS has no magic. + +* Inode layout: + - Minix FS + unsigned short i_mode; + unsigned short i_uid; + unsigned long i_size; + unsigned long i_time; + unsigned char i_gid; + unsigned char i_nlinks; + unsigned short i_zone[7+1+1]; + - Xenix FS, SystemV FS, Coherent FS + unsigned short i_mode; + unsigned short i_nlink; + unsigned short i_uid; + unsigned short i_gid; + unsigned long i_size; + unsigned char i_zone[3*(10+1+1+1)]; + unsigned long i_atime; + unsigned long i_mtime; + unsigned long i_ctime; + +* Regular file data blocks are organized as + - Minix FS + 7 direct blocks + 1 indirect block (pointers to blocks) + 1 double-indirect block (pointer to pointers to blocks) + - Xenix FS, SystemV FS, Coherent FS + 10 direct blocks + 1 indirect block (pointers to blocks) + 1 double-indirect block (pointer to pointers to blocks) + 1 triple-indirect block (pointer to pointers to pointers to blocks) + +* Inode size, inodes per block + - Minix FS 32 32 + - Xenix FS 64 16 + - SystemV FS 64 16 + - Coherent FS 64 8 + +* Directory entry on disk + - Minix FS + unsigned short inode; + char name[14/30]; + - Xenix FS, SystemV FS, Coherent FS + unsigned short inode; + char name[14]; + +* Dir entry size, dir entries per block + - Minix FS 16/32 64/32 + - Xenix FS 16 64 + - SystemV FS 16 64 + - Coherent FS 16 32 + +* How to implement symbolic links such that the host fsck doesn't scream: + - Minix FS normal + - Xenix FS kludge: as regular files with chmod 1000 + - SystemV FS ?? + - Coherent FS kludge: as regular files with chmod 1000 + + +Notation: We often speak of a "block" but mean a zone (the allocation unit) +and not the disk driver's notion of "block". + + +Bruno Haible diff --git a/trunk/fs/sysv/inode.c b/trunk/fs/sysv/inode.c index ead9864567e3..d63c5e48b050 100644 --- a/trunk/fs/sysv/inode.c +++ b/trunk/fs/sysv/inode.c @@ -301,13 +301,13 @@ static void sysv_delete_inode(struct inode *inode) unlock_kernel(); } -static struct kmem_cache *sysv_inode_cachep; +static kmem_cache_t *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; - si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); + si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL); if (!si) return NULL; return &si->vfs_inode; @@ -318,7 +318,7 @@ static void sysv_destroy_inode(struct inode *inode) kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } -static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags) +static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c index 1dbc2955f02e..1aea6a4f9a4a 100644 --- a/trunk/fs/udf/super.c +++ b/trunk/fs/udf/super.c @@ -107,12 +107,12 @@ static struct file_system_type udf_fstype = { .fs_flags = FS_REQUIRES_DEV, }; -static struct kmem_cache * udf_inode_cachep; +static kmem_cache_t * udf_inode_cachep; static struct inode *udf_alloc_inode(struct super_block *sb) { struct udf_inode_info *ei; - ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); + ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; @@ -130,7 +130,7 @@ static void udf_destroy_inode(struct inode *inode) kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct udf_inode_info *ei = (struct udf_inode_info *) foo; @@ -1709,7 +1709,7 @@ void udf_error(struct super_block *sb, const char *function, sb->s_dirt = 1; } va_start(args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); + vsprintf(error_buf, fmt, args); va_end(args); printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -1721,7 +1721,7 @@ void udf_warning(struct super_block *sb, const char *function, va_list args; va_start (args, fmt); - vsnprintf(error_buf, sizeof(error_buf), fmt, args); + vsprintf(error_buf, fmt, args); va_end(args); printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n", sb->s_id, function, error_buf); diff --git a/trunk/fs/ufs/super.c b/trunk/fs/ufs/super.c index 8a8e9382ec09..ec79e3091d1b 100644 --- a/trunk/fs/ufs/super.c +++ b/trunk/fs/ufs/super.c @@ -224,7 +224,7 @@ void ufs_error (struct super_block * sb, const char * function, sb->s_flags |= MS_RDONLY; } va_start (args, fmt); - vsnprintf (error_buf, sizeof(error_buf), fmt, args); + vsprintf (error_buf, fmt, args); va_end (args); switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: @@ -255,7 +255,7 @@ void ufs_panic (struct super_block * sb, const char * function, sb->s_dirt = 1; } va_start (args, fmt); - vsnprintf (error_buf, sizeof(error_buf), fmt, args); + vsprintf (error_buf, fmt, args); va_end (args); sb->s_flags |= MS_RDONLY; printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n", @@ -268,7 +268,7 @@ void ufs_warning (struct super_block * sb, const char * function, va_list args; va_start (args, fmt); - vsnprintf (error_buf, sizeof(error_buf), fmt, args); + vsprintf (error_buf, fmt, args); va_end (args); printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -1204,12 +1204,12 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static struct kmem_cache * ufs_inode_cachep; +static kmem_cache_t * ufs_inode_cachep; static struct inode *ufs_alloc_inode(struct super_block *sb) { struct ufs_inode_info *ei; - ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL); + ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -1221,7 +1221,7 @@ static void ufs_destroy_inode(struct inode *inode) kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; diff --git a/trunk/fs/ufs/util.h b/trunk/fs/ufs/util.h index 7dd12bb1d62b..28fce6c239b5 100644 --- a/trunk/fs/ufs/util.h +++ b/trunk/fs/ufs/util.h @@ -299,7 +299,7 @@ static inline void *get_usb_offset(struct ufs_sb_private_info *uspi, #define ubh_get_addr16(ubh,begin) \ (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \ - ((begin) & ((uspi->fsize>>1) - 1))) + ((begin) & (uspi->fsize>>1) - 1))) #define ubh_get_addr32(ubh,begin) \ (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \ diff --git a/trunk/fs/xfs/linux-2.6/xfs_aops.c b/trunk/fs/xfs/linux-2.6/xfs_aops.c index 8e6b56fc1cad..09360cf1e1f2 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_aops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_aops.c @@ -149,10 +149,9 @@ xfs_destroy_ioend( */ STATIC void xfs_end_bio_delalloc( - struct work_struct *work) + void *data) { - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); + xfs_ioend_t *ioend = data; xfs_destroy_ioend(ioend); } @@ -162,10 +161,9 @@ xfs_end_bio_delalloc( */ STATIC void xfs_end_bio_written( - struct work_struct *work) + void *data) { - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); + xfs_ioend_t *ioend = data; xfs_destroy_ioend(ioend); } @@ -178,10 +176,9 @@ xfs_end_bio_written( */ STATIC void xfs_end_bio_unwritten( - struct work_struct *work) + void *data) { - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); + xfs_ioend_t *ioend = data; bhv_vnode_t *vp = ioend->io_vnode; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; @@ -223,11 +220,11 @@ xfs_alloc_ioend( ioend->io_size = 0; if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); else - INIT_WORK(&ioend->io_work, xfs_end_bio_written); + INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); return ioend; } diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.c b/trunk/fs/xfs/linux-2.6/xfs_buf.c index 4fb01ffdfd1a..d3382843698e 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.c +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.c @@ -32,7 +32,6 @@ #include #include #include -#include STATIC kmem_zone_t *xfs_buf_zone; STATIC kmem_shaker_t xfs_buf_shake; @@ -995,10 +994,9 @@ xfs_buf_wait_unpin( STATIC void xfs_buf_iodone_work( - struct work_struct *work) + void *v) { - xfs_buf_t *bp = - container_of(work, xfs_buf_t, b_iodone_work); + xfs_buf_t *bp = (xfs_buf_t *)v; if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1019,10 +1017,10 @@ xfs_buf_ioend( if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { if (schedule) { - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); queue_work(xfslogd_workqueue, &bp->b_iodone_work); } else { - xfs_buf_iodone_work(&bp->b_iodone_work); + xfs_buf_iodone_work(bp); } } else { up(&bp->b_iodonesema); @@ -1827,11 +1825,11 @@ xfs_buf_init(void) if (!xfs_buf_zone) goto out_free_trace_buf; - xfslogd_workqueue = create_freezeable_workqueue("xfslogd"); + xfslogd_workqueue = create_workqueue("xfslogd"); if (!xfslogd_workqueue) goto out_free_buf_zone; - xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad"); + xfsdatad_workqueue = create_workqueue("xfsdatad"); if (!xfsdatad_workqueue) goto out_destroy_xfslogd_workqueue; diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c index b93265b7c79c..de05abbbe7fd 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.c +++ b/trunk/fs/xfs/linux-2.6/xfs_super.c @@ -56,7 +56,6 @@ #include #include #include -#include STATIC struct quotactl_ops xfs_quotactl_operations; STATIC struct super_operations xfs_super_operations; diff --git a/trunk/include/acpi/platform/aclinux.h b/trunk/include/acpi/platform/aclinux.h index 7f1e92930b62..47faf27913a5 100644 --- a/trunk/include/acpi/platform/aclinux.h +++ b/trunk/include/acpi/platform/aclinux.h @@ -64,7 +64,7 @@ /* Host-dependent types and defines */ #define ACPI_MACHINE_WIDTH BITS_PER_LONG -#define acpi_cache_t struct kmem_cache +#define acpi_cache_t kmem_cache_t #define acpi_spinlock spinlock_t * #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); #define strtoul simple_strtoul diff --git a/trunk/include/asm-alpha/dma-mapping.h b/trunk/include/asm-alpha/dma-mapping.h index 57e09f5e3424..b9ff4d8cb33a 100644 --- a/trunk/include/asm-alpha/dma-mapping.h +++ b/trunk/include/asm-alpha/dma-mapping.h @@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(dev) (1) int dma_set_mask(struct device *dev, u64 mask); @@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask); #define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0) #define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0) #define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0) -#define dma_cache_sync(dev, va, size, dir) do { } while (0) +#define dma_cache_sync(va, size, dir) do { } while (0) #define dma_get_cache_alignment() L1_CACHE_BYTES diff --git a/trunk/include/asm-alpha/unistd.h b/trunk/include/asm-alpha/unistd.h index 84313d14e780..2cabbd465c0c 100644 --- a/trunk/include/asm-alpha/unistd.h +++ b/trunk/include/asm-alpha/unistd.h @@ -387,6 +387,188 @@ #define NR_SYSCALLS 447 +#if defined(__GNUC__) + +#define _syscall_return(type) \ + return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret + +#define _syscall_clobbers \ + "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \ + "$22", "$23", "$24", "$25", "$27", "$28" \ + +#define _syscall0(type, name) \ +type name(void) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_19 __asm__("$19"); \ + \ + _sc_0 = __NR_##name; \ + __asm__("callsys # %0 %1 %2" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_19 __asm__("$19"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + __asm__("callsys # %0 %1 %2 %3" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_17 __asm__("$17"); \ + register long _sc_19 __asm__("$19"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + _sc_17 = (long) (arg2); \ + __asm__("callsys # %0 %1 %2 %3 %4" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_17 __asm__("$17"); \ + register long _sc_18 __asm__("$18"); \ + register long _sc_19 __asm__("$19"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + _sc_17 = (long) (arg2); \ + _sc_18 = (long) (arg3); \ + __asm__("callsys # %0 %1 %2 %3 %4 %5" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \ + "r"(_sc_18) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_17 __asm__("$17"); \ + register long _sc_18 __asm__("$18"); \ + register long _sc_19 __asm__("$19"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + _sc_17 = (long) (arg2); \ + _sc_18 = (long) (arg3); \ + _sc_19 = (long) (arg4); \ + __asm__("callsys # %0 %1 %2 %3 %4 %5 %6" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \ + "r"(_sc_18), "1"(_sc_19) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_17 __asm__("$17"); \ + register long _sc_18 __asm__("$18"); \ + register long _sc_19 __asm__("$19"); \ + register long _sc_20 __asm__("$20"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + _sc_17 = (long) (arg2); \ + _sc_18 = (long) (arg3); \ + _sc_19 = (long) (arg4); \ + _sc_20 = (long) (arg5); \ + __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \ + "r"(_sc_18), "1"(_sc_19), "r"(_sc_20) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5,type6,arg6) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\ +{ \ + long _sc_ret, _sc_err; \ + { \ + register long _sc_0 __asm__("$0"); \ + register long _sc_16 __asm__("$16"); \ + register long _sc_17 __asm__("$17"); \ + register long _sc_18 __asm__("$18"); \ + register long _sc_19 __asm__("$19"); \ + register long _sc_20 __asm__("$20"); \ + register long _sc_21 __asm__("$21"); \ + \ + _sc_0 = __NR_##name; \ + _sc_16 = (long) (arg1); \ + _sc_17 = (long) (arg2); \ + _sc_18 = (long) (arg3); \ + _sc_19 = (long) (arg4); \ + _sc_20 = (long) (arg5); \ + _sc_21 = (long) (arg6); \ + __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8" \ + : "=r"(_sc_0), "=r"(_sc_19) \ + : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \ + "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \ + : _syscall_clobbers); \ + _sc_ret = _sc_0, _sc_err = _sc_19; \ + } \ + _syscall_return(type); \ +} + +#endif /* __GNUC__ */ + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-arm/arch-omap/irda.h b/trunk/include/asm-arm/arch-omap/irda.h index 345a649ec838..805ae3575e44 100644 --- a/trunk/include/asm-arm/arch-omap/irda.h +++ b/trunk/include/asm-arm/arch-omap/irda.h @@ -24,7 +24,7 @@ struct omap_irda_config { /* Very specific to the needs of some platforms (h3,h4) * having calls which can sleep in irda_set_speed. */ - struct delayed_work gpio_expa; + struct work_struct gpio_expa; int rx_channel; int tx_channel; unsigned long dest_start; diff --git a/trunk/include/asm-arm/dma-mapping.h b/trunk/include/asm-arm/dma-mapping.h index 9bc46b486afb..666617711c81 100644 --- a/trunk/include/asm-arm/dma-mapping.h +++ b/trunk/include/asm-arm/dma-mapping.h @@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void) return 32; } -static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) +static inline int dma_is_consistent(dma_addr_t handle) { return !!arch_is_coherent(); } diff --git a/trunk/include/asm-arm/setup.h b/trunk/include/asm-arm/setup.h index e5407392afca..aa4b5782f0c9 100644 --- a/trunk/include/asm-arm/setup.h +++ b/trunk/include/asm-arm/setup.h @@ -14,57 +14,55 @@ #ifndef __ASMARM_SETUP_H #define __ASMARM_SETUP_H -#include - #define COMMAND_LINE_SIZE 1024 /* The list ends with an ATAG_NONE node. */ #define ATAG_NONE 0x00000000 struct tag_header { - __u32 size; - __u32 tag; + u32 size; + u32 tag; }; /* The list must start with an ATAG_CORE node */ #define ATAG_CORE 0x54410001 struct tag_core { - __u32 flags; /* bit 0 = read-only */ - __u32 pagesize; - __u32 rootdev; + u32 flags; /* bit 0 = read-only */ + u32 pagesize; + u32 rootdev; }; /* it is allowed to have multiple ATAG_MEM nodes */ #define ATAG_MEM 0x54410002 struct tag_mem32 { - __u32 size; - __u32 start; /* physical start address */ + u32 size; + u32 start; /* physical start address */ }; /* VGA text type displays */ #define ATAG_VIDEOTEXT 0x54410003 struct tag_videotext { - __u8 x; - __u8 y; - __u16 video_page; - __u8 video_mode; - __u8 video_cols; - __u16 video_ega_bx; - __u8 video_lines; - __u8 video_isvga; - __u16 video_points; + u8 x; + u8 y; + u16 video_page; + u8 video_mode; + u8 video_cols; + u16 video_ega_bx; + u8 video_lines; + u8 video_isvga; + u16 video_points; }; /* describes how the ramdisk will be used in kernel */ #define ATAG_RAMDISK 0x54410004 struct tag_ramdisk { - __u32 flags; /* bit 0 = load, bit 1 = prompt */ - __u32 size; /* decompressed ramdisk size in _kilo_ bytes */ - __u32 start; /* starting block of floppy-based RAM disk image */ + u32 flags; /* bit 0 = load, bit 1 = prompt */ + u32 size; /* decompressed ramdisk size in _kilo_ bytes */ + u32 start; /* starting block of floppy-based RAM disk image */ }; /* describes where the compressed ramdisk image lives (virtual address) */ @@ -78,23 +76,23 @@ struct tag_ramdisk { #define ATAG_INITRD2 0x54420005 struct tag_initrd { - __u32 start; /* physical start address */ - __u32 size; /* size of compressed ramdisk image in bytes */ + u32 start; /* physical start address */ + u32 size; /* size of compressed ramdisk image in bytes */ }; /* board serial number. "64 bits should be enough for everybody" */ #define ATAG_SERIAL 0x54410006 struct tag_serialnr { - __u32 low; - __u32 high; + u32 low; + u32 high; }; /* board revision */ #define ATAG_REVISION 0x54410007 struct tag_revision { - __u32 rev; + u32 rev; }; /* initial values for vesafb-type framebuffers. see struct screen_info @@ -103,20 +101,20 @@ struct tag_revision { #define ATAG_VIDEOLFB 0x54410008 struct tag_videolfb { - __u16 lfb_width; - __u16 lfb_height; - __u16 lfb_depth; - __u16 lfb_linelength; - __u32 lfb_base; - __u32 lfb_size; - __u8 red_size; - __u8 red_pos; - __u8 green_size; - __u8 green_pos; - __u8 blue_size; - __u8 blue_pos; - __u8 rsvd_size; - __u8 rsvd_pos; + u16 lfb_width; + u16 lfb_height; + u16 lfb_depth; + u16 lfb_linelength; + u32 lfb_base; + u32 lfb_size; + u8 red_size; + u8 red_pos; + u8 green_size; + u8 green_pos; + u8 blue_size; + u8 blue_pos; + u8 rsvd_size; + u8 rsvd_pos; }; /* command line: \0 terminated string */ @@ -130,17 +128,17 @@ struct tag_cmdline { #define ATAG_ACORN 0x41000101 struct tag_acorn { - __u32 memc_control_reg; - __u32 vram_pages; - __u8 sounddefault; - __u8 adfsdrives; + u32 memc_control_reg; + u32 vram_pages; + u8 sounddefault; + u8 adfsdrives; }; /* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ #define ATAG_MEMCLK 0x41000402 struct tag_memclk { - __u32 fmemclk; + u32 fmemclk; }; struct tag { @@ -169,26 +167,24 @@ struct tag { }; struct tagtable { - __u32 tag; + u32 tag; int (*parse)(const struct tag *); }; +#define __tag __attribute_used__ __attribute__((__section__(".taglist.init"))) +#define __tagtable(tag, fn) \ +static struct tagtable __tagtable_##fn __tag = { tag, fn } + #define tag_member_present(tag,member) \ ((unsigned long)(&((struct tag *)0L)->member + 1) \ <= (tag)->hdr.size * 4) -#define tag_next(t) ((struct tag *)((__u32 *)(t) + (t)->hdr.size)) +#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size)) #define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2) #define for_each_tag(t,base) \ for (t = base; t->hdr.size; t = tag_next(t)) -#ifdef __KERNEL__ - -#define __tag __attribute_used__ __attribute__((__section__(".taglist.init"))) -#define __tagtable(tag, fn) \ -static struct tagtable __tagtable_##fn __tag = { tag, fn } - /* * Memory map description */ @@ -221,6 +217,4 @@ struct early_params { static struct early_params __early_##fn __attribute_used__ \ __attribute__((__section__(".early_param.init"))) = { name, fn } -#endif /* __KERNEL__ */ - #endif diff --git a/trunk/include/asm-arm/unistd.h b/trunk/include/asm-arm/unistd.h index d44c629d8424..14a87eec5a2d 100644 --- a/trunk/include/asm-arm/unistd.h +++ b/trunk/include/asm-arm/unistd.h @@ -377,6 +377,156 @@ #endif #ifdef __KERNEL__ +#include +#include + +#define __sys2(x) #x +#define __sys1(x) __sys2(x) + +#ifndef __syscall +#if defined(__thumb__) || defined(__ARM_EABI__) +#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name; +#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs +#define __syscall(name) "swi\t0" +#else +#define __SYS_REG(name) +#define __SYS_REG_LIST(regs...) regs +#define __syscall(name) "swi\t" __sys1(__NR_##name) "" +#endif +#endif + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type,name) \ +type name(void) { \ + __SYS_REG(name) \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST() \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0), "r" (__r1) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __r4 __asm__("r4") = (long)arg5; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \ + "r" (__r3), "r" (__r4) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \ + __SYS_REG(name) \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __r4 __asm__("r4") = (long)arg5; \ + register long __r5 __asm__("r5") = (long)arg6; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \ + "r" (__r3), "r" (__r4), "r" (__r5) ) \ + : "memory" ); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-arm26/pgalloc.h b/trunk/include/asm-arm26/pgalloc.h index 7725af3ddb4d..6437167b1ffe 100644 --- a/trunk/include/asm-arm26/pgalloc.h +++ b/trunk/include/asm-arm26/pgalloc.h @@ -15,7 +15,7 @@ #include #include -extern struct kmem_cache *pte_cache; +extern kmem_cache_t *pte_cache; static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){ return kmem_cache_alloc(pte_cache, GFP_KERNEL); diff --git a/trunk/include/asm-arm26/setup.h b/trunk/include/asm-arm26/setup.h index 1a867b4e8d53..6348931be65d 100644 --- a/trunk/include/asm-arm26/setup.h +++ b/trunk/include/asm-arm26/setup.h @@ -16,8 +16,6 @@ #define COMMAND_LINE_SIZE 1024 -#ifdef __KERNEL__ - /* The list ends with an ATAG_NONE node. */ #define ATAG_NONE 0x00000000 @@ -204,6 +202,4 @@ struct meminfo { extern struct meminfo meminfo; -#endif /* __KERNEL__ */ - #endif diff --git a/trunk/include/asm-arm26/unistd.h b/trunk/include/asm-arm26/unistd.h index 4c3b919177e5..25a5eead85be 100644 --- a/trunk/include/asm-arm26/unistd.h +++ b/trunk/include/asm-arm26/unistd.h @@ -311,6 +311,139 @@ #define __ARM_NR_usr26 (__ARM_NR_BASE+3) #ifdef __KERNEL__ +#include +#include + +#define __sys2(x) #x +#define __sys1(x) __sys2(x) + +#ifndef __syscall +#define __syscall(name) "swi\t" __sys1(__NR_##name) "" +#endif + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type,name) \ +type name(void) { \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0),"r" (__r1) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0),"r" (__r1),"r" (__r2) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __r4 __asm__("r4") = (long)arg5; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \ + register long __r0 __asm__("r0") = (long)arg1; \ + register long __r1 __asm__("r1") = (long)arg2; \ + register long __r2 __asm__("r2") = (long)arg3; \ + register long __r3 __asm__("r3") = (long)arg4; \ + register long __r4 __asm__("r4") = (long)arg5; \ + register long __r5 __asm__("r5") = (long)arg6; \ + register long __res_r0 __asm__("r0"); \ + long __res; \ + __asm__ __volatile__ ( \ + __syscall(name) \ + : "=r" (__res_r0) \ + : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5) \ + : "lr"); \ + __res = __res_r0; \ + __syscall_return(type,__res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-avr32/dma-mapping.h b/trunk/include/asm-avr32/dma-mapping.h index 0580b5d62bba..4c40cb41cdf8 100644 --- a/trunk/include/asm-avr32/dma-mapping.h +++ b/trunk/include/asm-avr32/dma-mapping.h @@ -8,8 +8,7 @@ #include #include -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - int direction); +extern void dma_cache_sync(void *vaddr, size_t size, int direction); /* * Return whether the given device DMA address mask can be supported @@ -308,7 +307,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +static inline int dma_is_consistent(dma_addr_t dma_addr) { return 1; } diff --git a/trunk/include/asm-avr32/setup.h b/trunk/include/asm-avr32/setup.h index 0a5224245e44..10193da4113b 100644 --- a/trunk/include/asm-avr32/setup.h +++ b/trunk/include/asm-avr32/setup.h @@ -13,8 +13,6 @@ #define COMMAND_LINE_SIZE 256 -#ifdef __KERNEL__ - /* Magic number indicating that a tag table is present */ #define ATAG_MAGIC 0xa2a25441 @@ -140,6 +138,4 @@ void chip_enable_sdram(void); #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* __ASM_AVR32_SETUP_H__ */ diff --git a/trunk/include/asm-cris/arch-v10/bitops.h b/trunk/include/asm-cris/arch-v10/bitops.h index be85f6de25d3..b73f5396e5a6 100644 --- a/trunk/include/asm-cris/arch-v10/bitops.h +++ b/trunk/include/asm-cris/arch-v10/bitops.h @@ -10,7 +10,7 @@ * number. They differ in that the first function also inverts all bits * in the input. */ -static inline unsigned long cris_swapnwbrlz(unsigned long w) +extern inline unsigned long cris_swapnwbrlz(unsigned long w) { /* Let's just say we return the result in the same register as the input. Saying we clobber the input but can return the result @@ -26,7 +26,7 @@ static inline unsigned long cris_swapnwbrlz(unsigned long w) return res; } -static inline unsigned long cris_swapwbrlz(unsigned long w) +extern inline unsigned long cris_swapwbrlz(unsigned long w) { unsigned res; __asm__ ("swapwbr %0 \n\t" @@ -40,7 +40,7 @@ static inline unsigned long cris_swapwbrlz(unsigned long w) * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. */ -static inline unsigned long ffz(unsigned long w) +extern inline unsigned long ffz(unsigned long w) { return cris_swapnwbrlz(w); } @@ -51,7 +51,7 @@ static inline unsigned long ffz(unsigned long w) * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) +extern inline unsigned long __ffs(unsigned long word) { return cris_swapnwbrlz(~word); } @@ -65,7 +65,7 @@ static inline unsigned long __ffs(unsigned long word) * differs in spirit from the above ffz (man ffs). */ -static inline unsigned long kernel_ffs(unsigned long w) +extern inline unsigned long kernel_ffs(unsigned long w) { return w ? cris_swapwbrlz (w) + 1 : 0; } diff --git a/trunk/include/asm-cris/dma-mapping.h b/trunk/include/asm-cris/dma-mapping.h index 662cea70152d..cbf1a98f0129 100644 --- a/trunk/include/asm-cris/dma-mapping.h +++ b/trunk/include/asm-cris/dma-mapping.h @@ -156,10 +156,10 @@ dma_get_cache_alignment(void) return (1 << INTERNODE_CACHE_SHIFT); } -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { } diff --git a/trunk/include/asm-cris/semaphore-helper.h b/trunk/include/asm-cris/semaphore-helper.h index a8e1e6cb7cd0..dbd0f30b85b6 100644 --- a/trunk/include/asm-cris/semaphore-helper.h +++ b/trunk/include/asm-cris/semaphore-helper.h @@ -20,12 +20,12 @@ /* * These two _must_ execute atomically wrt each other. */ -static inline void wake_one_more(struct semaphore * sem) +extern inline void wake_one_more(struct semaphore * sem) { atomic_inc(&sem->waking); } -static inline int waking_non_zero(struct semaphore *sem) +extern inline int waking_non_zero(struct semaphore *sem) { unsigned long flags; int ret = 0; @@ -40,7 +40,7 @@ static inline int waking_non_zero(struct semaphore *sem) return ret; } -static inline int waking_non_zero_interruptible(struct semaphore *sem, +extern inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) { int ret = 0; @@ -59,7 +59,7 @@ static inline int waking_non_zero_interruptible(struct semaphore *sem, return ret; } -static inline int waking_non_zero_trylock(struct semaphore *sem) +extern inline int waking_non_zero_trylock(struct semaphore *sem) { int ret = 1; unsigned long flags; diff --git a/trunk/include/asm-frv/dma-mapping.h b/trunk/include/asm-frv/dma-mapping.h index bcb2df68496e..e9fc1d47797e 100644 --- a/trunk/include/asm-frv/dma-mapping.h +++ b/trunk/include/asm-frv/dma-mapping.h @@ -172,10 +172,10 @@ int dma_get_cache_alignment(void) return 1 << L1_CACHE_SHIFT; } -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); diff --git a/trunk/include/asm-frv/highmem.h b/trunk/include/asm-frv/highmem.h index ff4d6cdeb152..0f390f41f816 100644 --- a/trunk/include/asm-frv/highmem.h +++ b/trunk/include/asm-frv/highmem.h @@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) { unsigned long paddr; - pagefault_disable(); + inc_preempt_count(); paddr = page_to_phys(page); switch (type) { @@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) default: BUG(); } - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); } #endif /* !__ASSEMBLY__ */ diff --git a/trunk/include/asm-frv/param.h b/trunk/include/asm-frv/param.h index 365653b1726c..168381ebb41a 100644 --- a/trunk/include/asm-frv/param.h +++ b/trunk/include/asm-frv/param.h @@ -18,5 +18,6 @@ #endif #define MAXHOSTNAMELEN 64 /* max length of hostname */ +#define COMMAND_LINE_SIZE 512 #endif /* _ASM_PARAM_H */ diff --git a/trunk/include/asm-frv/setup.h b/trunk/include/asm-frv/setup.h index afd787ceede6..0d293b9a5857 100644 --- a/trunk/include/asm-frv/setup.h +++ b/trunk/include/asm-frv/setup.h @@ -12,10 +12,6 @@ #ifndef _ASM_SETUP_H #define _ASM_SETUP_H -#define COMMAND_LINE_SIZE 512 - -#ifdef __KERNEL__ - #include #ifndef __ASSEMBLY__ @@ -26,6 +22,4 @@ extern unsigned long __initdata num_mappedpages; #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _ASM_SETUP_H */ diff --git a/trunk/include/asm-frv/unistd.h b/trunk/include/asm-frv/unistd.h index 584c0417ae4d..725e854928cf 100644 --- a/trunk/include/asm-frv/unistd.h +++ b/trunk/include/asm-frv/unistd.h @@ -320,6 +320,125 @@ #ifdef __KERNEL__ #define NR_syscalls 310 +#include + +/* + * process the return value of a syscall, consigning it to one of two possible fates + * - user-visible error numbers are in the range -1 - -4095: see + */ +#undef __syscall_return +#define __syscall_return(type, res) \ +do { \ + unsigned long __sr2 = (res); \ + if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \ + errno = (-__sr2); \ + __sr2 = ~0UL; \ + } \ + return (type) __sr2; \ +} while (0) + +/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ + +#undef _syscall0 +#define _syscall0(type,name) \ +type name(void) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8"); \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "=r" (__sc0) \ + : "r" (__scnum)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall1 +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall2 +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum), "r" (__sc1)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall3 +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \ + register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum), "r" (__sc1), "r" (__sc2)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall4 +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \ + register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \ + register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall5 +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \ + register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \ + register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \ + register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum), "r" (__sc1), "r" (__sc2), \ + "r" (__sc3), "r" (__sc4)); \ + __syscall_return(type, __sc0); \ +} + +#undef _syscall6 +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ +{ \ + register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \ + register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \ + register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \ + register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \ + register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \ + register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \ + register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6; \ + __asm__ __volatile__ ("tira gr0,#0" \ + : "+r" (__sc0) \ + : "r" (__scnum), "r" (__sc1), "r" (__sc2), \ + "r" (__sc3), "r" (__sc4), "r" (__sc5)); \ + __syscall_return(type, __sc0); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION /* #define __ARCH_WANT_OLD_READDIR */ diff --git a/trunk/include/asm-generic/Kbuild b/trunk/include/asm-generic/Kbuild index fa14f8cd30c5..3c06be381701 100644 --- a/trunk/include/asm-generic/Kbuild +++ b/trunk/include/asm-generic/Kbuild @@ -1,3 +1,4 @@ +header-y += atomic.h header-y += errno-base.h header-y += errno.h header-y += fcntl.h diff --git a/trunk/include/asm-generic/Kbuild.asm b/trunk/include/asm-generic/Kbuild.asm index a37e95fe58d6..a84c3d88a189 100644 --- a/trunk/include/asm-generic/Kbuild.asm +++ b/trunk/include/asm-generic/Kbuild.asm @@ -14,7 +14,6 @@ unifdef-y += posix_types.h unifdef-y += ptrace.h unifdef-y += resource.h unifdef-y += sembuf.h -unifdef-y += setup.h unifdef-y += shmbuf.h unifdef-y += sigcontext.h unifdef-y += siginfo.h diff --git a/trunk/include/asm-generic/atomic.h b/trunk/include/asm-generic/atomic.h index b7e4a0467cb1..42a95d9a0641 100644 --- a/trunk/include/asm-generic/atomic.h +++ b/trunk/include/asm-generic/atomic.h @@ -66,7 +66,7 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) atomic64_sub(i, v); } -#else /* BITS_PER_LONG == 64 */ +#else typedef atomic_t atomic_long_t; @@ -113,6 +113,5 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) atomic_sub(i, v); } -#endif /* BITS_PER_LONG == 64 */ - -#endif /* _ASM_GENERIC_ATOMIC_H */ +#endif +#endif diff --git a/trunk/include/asm-generic/dma-mapping.h b/trunk/include/asm-generic/dma-mapping.h index 783ab9944d70..b541e48cc545 100644 --- a/trunk/include/asm-generic/dma-mapping.h +++ b/trunk/include/asm-generic/dma-mapping.h @@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline int dma_get_cache_alignment(void) @@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, } static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { /* could define this in terms of the dma_cache ... operations, diff --git a/trunk/include/asm-generic/futex.h b/trunk/include/asm-generic/futex.h index f422df0956a2..df893c160318 100644 --- a/trunk/include/asm-generic/futex.h +++ b/trunk/include/asm-generic/futex.h @@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-h8300/delay.h b/trunk/include/asm-h8300/delay.h index 743beba70f82..cbccbbdd640f 100644 --- a/trunk/include/asm-h8300/delay.h +++ b/trunk/include/asm-h8300/delay.h @@ -9,7 +9,7 @@ * Delay routines, using a pre-computed "loops_per_second" value. */ -static inline void __delay(unsigned long loops) +extern __inline__ void __delay(unsigned long loops) { __asm__ __volatile__ ("1:\n\t" "dec.l #1,%0\n\t" @@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops) extern unsigned long loops_per_jiffy; -static inline void udelay(unsigned long usecs) +extern __inline__ void udelay(unsigned long usecs) { usecs *= 4295; /* 2**32 / 1000000 */ usecs /= (loops_per_jiffy*HZ); diff --git a/trunk/include/asm-h8300/mmu_context.h b/trunk/include/asm-h8300/mmu_context.h index 5c165f7bee0e..855721a5dcc9 100644 --- a/trunk/include/asm-h8300/mmu_context.h +++ b/trunk/include/asm-h8300/mmu_context.h @@ -9,7 +9,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -static inline int +extern inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { // mm->context = virt_to_phys(mm->pgd); @@ -23,7 +23,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str { } -static inline void activate_mm(struct mm_struct *prev_mm, +extern inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) { } diff --git a/trunk/include/asm-h8300/pci.h b/trunk/include/asm-h8300/pci.h index 0c771b05fdd5..5edad5b70fd5 100644 --- a/trunk/include/asm-h8300/pci.h +++ b/trunk/include/asm-h8300/pci.h @@ -10,12 +10,12 @@ #define pcibios_assign_all_busses() 0 #define pcibios_scan_all_fns(a, b) 0 -static inline void pcibios_set_master(struct pci_dev *dev) +extern inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } -static inline void pcibios_penalize_isa_irq(int irq, int active) +extern inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't do dynamic PCI IRQ allocation */ } diff --git a/trunk/include/asm-h8300/tlbflush.h b/trunk/include/asm-h8300/tlbflush.h index 9a2c5c9fd700..bbdffbeeedef 100644 --- a/trunk/include/asm-h8300/tlbflush.h +++ b/trunk/include/asm-h8300/tlbflush.h @@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm, BUG(); } -static inline void flush_tlb_kernel_page(unsigned long addr) +extern inline void flush_tlb_kernel_page(unsigned long addr) { BUG(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, +extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { BUG(); diff --git a/trunk/include/asm-h8300/unistd.h b/trunk/include/asm-h8300/unistd.h index 7ddd414f8d16..747788d629ae 100644 --- a/trunk/include/asm-h8300/unistd.h +++ b/trunk/include/asm-h8300/unistd.h @@ -295,6 +295,172 @@ #ifdef __KERNEL__ #define NR_syscalls 289 +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see + */ + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + /* avoid using res which is declared to be in register d0; \ + errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type, name) \ +type name(void) \ +{ \ + register long __res __asm__("er0"); \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall1(type, name, atype, a) \ +type name(atype a) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + _a = (long)a; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall2(type, name, atype, a, btype, b) \ +type name(atype a, btype b) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + register long _b __asm__("er2"); \ + _a = (long)a; \ + _b = (long)b; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a), \ + "g" (_b) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall3(type, name, atype, a, btype, b, ctype, c) \ +type name(atype a, btype b, ctype c) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + register long _b __asm__("er2"); \ + register long _c __asm__("er3"); \ + _a = (long)a; \ + _b = (long)b; \ + _c = (long)c; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a), \ + "g" (_b), \ + "g" (_c) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall4(type, name, atype, a, btype, b, \ + ctype, c, dtype, d) \ +type name(atype a, btype b, ctype c, dtype d) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + register long _b __asm__("er2"); \ + register long _c __asm__("er3"); \ + register long _d __asm__("er4"); \ + _a = (long)a; \ + _b = (long)b; \ + _c = (long)c; \ + _d = (long)d; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a), \ + "g" (_b), \ + "g" (_c), \ + "g" (_d) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall5(type, name, atype, a, btype, b, \ + ctype, c, dtype, d, etype, e) \ +type name(atype a, btype b, ctype c, dtype d, etype e) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + register long _b __asm__("er2"); \ + register long _c __asm__("er3"); \ + register long _d __asm__("er4"); \ + register long _e __asm__("er5"); \ + _a = (long)a; \ + _b = (long)b; \ + _c = (long)c; \ + _d = (long)d; \ + _e = (long)e; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a), \ + "g" (_b), \ + "g" (_c), \ + "g" (_d), \ + "g" (_e) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} + +#define _syscall6(type, name, atype, a, btype, b, \ + ctype, c, dtype, d, etype, e, ftype, f) \ +type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \ +{ \ + register long __res __asm__("er0"); \ + register long _a __asm__("er1"); \ + register long _b __asm__("er2"); \ + register long _c __asm__("er3"); \ + register long _d __asm__("er4"); \ + register long _e __asm__("er5"); \ + register long _f __asm__("er6"); \ + _a = (long)a; \ + _b = (long)b; \ + _c = (long)c; \ + _d = (long)d; \ + _e = (long)e; \ + _f = (long)f; \ + __asm__ __volatile__ ("mov.l %1,er0\n\t" \ + "trapa #0\n\t" \ + : "=r" (__res) \ + : "g" (__NR_##name), \ + "g" (_a), \ + "g" (_b), \ + "g" (_c), \ + "g" (_d), \ + "g" (_e) \ + "g" (_f) \ + : "cc", "memory"); \ + __syscall_return(type, __res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-i386/Kbuild b/trunk/include/asm-i386/Kbuild index 5ae93afc67e1..147e4ac1ebf0 100644 --- a/trunk/include/asm-i386/Kbuild +++ b/trunk/include/asm-i386/Kbuild @@ -7,4 +7,5 @@ header-y += ptrace-abi.h header-y += ucontext.h unifdef-y += mtrr.h +unifdef-y += setup.h unifdef-y += vm86.h diff --git a/trunk/include/asm-i386/atomic.h b/trunk/include/asm-i386/atomic.h index a6c024e2506f..51a166242522 100644 --- a/trunk/include/asm-i386/atomic.h +++ b/trunk/include/asm-i386/atomic.h @@ -14,7 +14,7 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } diff --git a/trunk/include/asm-i386/dma-mapping.h b/trunk/include/asm-i386/dma-mapping.h index 183eebeebbdc..81999a3ebe7c 100644 --- a/trunk/include/asm-i386/dma-mapping.h +++ b/trunk/include/asm-i386/dma-mapping.h @@ -156,10 +156,10 @@ dma_get_cache_alignment(void) return (1 << INTERNODE_CACHE_SHIFT); } -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); diff --git a/trunk/include/asm-i386/futex.h b/trunk/include/asm-i386/futex.h index 438ef0ec7101..946d97cfea23 100644 --- a/trunk/include/asm-i386/futex.h +++ b/trunk/include/asm-i386/futex.h @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); if (op == FUTEX_OP_SET) __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); @@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-i386/mmzone.h b/trunk/include/asm-i386/mmzone.h index 3503ad66945e..61b073322006 100644 --- a/trunk/include/asm-i386/mmzone.h +++ b/trunk/include/asm-i386/mmzone.h @@ -120,26 +120,13 @@ static inline int pfn_valid(int pfn) __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) -#define alloc_bootmem_node(pgdat, x) \ -({ \ - struct pglist_data __attribute__ ((unused)) \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ - __pa(MAX_DMA_ADDRESS)); \ -}) -#define alloc_bootmem_pages_node(pgdat, x) \ -({ \ - struct pglist_data __attribute__ ((unused)) \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)) \ -}) -#define alloc_bootmem_low_pages_node(pgdat, x) \ -({ \ - struct pglist_data __attribute__ ((unused)) \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ -}) +#define alloc_bootmem_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_low_pages_node(ignore, x) \ + __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) + #endif /* CONFIG_NEED_MULTIPLE_NODES */ #endif /* _ASM_MMZONE_H_ */ diff --git a/trunk/include/asm-i386/param.h b/trunk/include/asm-i386/param.h index 21b32466fcdc..745dc5bd0fbc 100644 --- a/trunk/include/asm-i386/param.h +++ b/trunk/include/asm-i386/param.h @@ -18,5 +18,6 @@ #endif #define MAXHOSTNAMELEN 64 /* max length of hostname */ +#define COMMAND_LINE_SIZE 256 #endif diff --git a/trunk/include/asm-i386/pgtable.h b/trunk/include/asm-i386/pgtable.h index bfee7ddfff53..7d398f493dde 100644 --- a/trunk/include/asm-i386/pgtable.h +++ b/trunk/include/asm-i386/pgtable.h @@ -34,14 +34,14 @@ struct vm_area_struct; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; -extern struct kmem_cache *pgd_cache; -extern struct kmem_cache *pmd_cache; +extern kmem_cache_t *pgd_cache; +extern kmem_cache_t *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; -void pmd_ctor(void *, struct kmem_cache *, unsigned long); -void pgd_ctor(void *, struct kmem_cache *, unsigned long); -void pgd_dtor(void *, struct kmem_cache *, unsigned long); +void pmd_ctor(void *, kmem_cache_t *, unsigned long); +void pgd_ctor(void *, kmem_cache_t *, unsigned long); +void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); diff --git a/trunk/include/asm-i386/rwsem.h b/trunk/include/asm-i386/rwsem.h index 041906f3c6df..bc598d6388e3 100644 --- a/trunk/include/asm-i386/rwsem.h +++ b/trunk/include/asm-i386/rwsem.h @@ -75,8 +75,8 @@ struct rw_semaphore { #define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/trunk/include/asm-i386/setup.h b/trunk/include/asm-i386/setup.h index c5b504bfbaad..2734909eff84 100644 --- a/trunk/include/asm-i386/setup.h +++ b/trunk/include/asm-i386/setup.h @@ -6,8 +6,6 @@ #ifndef _i386_SETUP_H #define _i386_SETUP_H -#define COMMAND_LINE_SIZE 256 - #ifdef __KERNEL__ #include @@ -16,8 +14,10 @@ */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) +#endif #define PARAM_SIZE 4096 +#define COMMAND_LINE_SIZE 256 #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F @@ -78,6 +78,4 @@ void __init add_memory_region(unsigned long long start, #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _i386_SETUP_H */ diff --git a/trunk/include/asm-i386/spinlock_types.h b/trunk/include/asm-i386/spinlock_types.h index 4da9345c1500..59efe849f351 100644 --- a/trunk/include/asm-i386/spinlock_types.h +++ b/trunk/include/asm-i386/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - unsigned int slock; + volatile unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { - unsigned int lock; + volatile unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } diff --git a/trunk/include/asm-i386/suspend.h b/trunk/include/asm-i386/suspend.h index c1da5caafaf7..08be1e5009d4 100644 --- a/trunk/include/asm-i386/suspend.h +++ b/trunk/include/asm-i386/suspend.h @@ -6,7 +6,18 @@ #include #include -static inline int arch_prepare_suspend(void) { return 0; } +static inline int +arch_prepare_suspend(void) +{ + /* If you want to make non-PSE machine work, turn off paging + in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so + it could work... */ + if (!cpu_has_pse) { + printk(KERN_ERR "PSE is required for swsusp.\n"); + return -EPERM; + } + return 0; +} /* image of the saved processor state */ struct saved_context { diff --git a/trunk/include/asm-i386/unistd.h b/trunk/include/asm-i386/unistd.h index 833fa1704ff9..beeeaf6b054a 100644 --- a/trunk/include/asm-i386/unistd.h +++ b/trunk/include/asm-i386/unistd.h @@ -329,6 +329,104 @@ #ifdef __KERNEL__ #define NR_syscalls 320 +#include + +/* + * user-visible error numbers are in the range -1 - -MAX_ERRNO: see + * + */ +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +__asm__ volatile ("int $0x80" \ + : "=a" (__res) \ + : "0" (__NR_##name)); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +long __res; \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ + : "=a" (__res) \ + : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +long __res; \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ + : "=a" (__res) \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ + : "=a" (__res) \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)) : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +long __res; \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ + : "=a" (__res) \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \ + "int $0x80 ; pop %%ebx" \ + : "=a" (__res) \ + : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5,type6,arg6) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ +{ \ +long __res; \ + struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \ +__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ + "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \ + "pop %%ebx ; pop %%ebp" \ + : "=a" (__res) \ + : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ + : "memory"); \ +__syscall_return(type,__res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-ia64/Kbuild b/trunk/include/asm-ia64/Kbuild index 4a1e48b9f403..15818a18bc52 100644 --- a/trunk/include/asm-ia64/Kbuild +++ b/trunk/include/asm-ia64/Kbuild @@ -10,6 +10,7 @@ header-y += intrinsics.h header-y += perfmon_default_smpl.h header-y += ptrace_offsets.h header-y += rse.h +header-y += setup.h header-y += ucontext.h unifdef-y += perfmon.h diff --git a/trunk/include/asm-ia64/dma-mapping.h b/trunk/include/asm-ia64/dma-mapping.h index ebd5887f4b1a..99a8f8e1218c 100644 --- a/trunk/include/asm-ia64/dma-mapping.h +++ b/trunk/include/asm-ia64/dma-mapping.h @@ -50,8 +50,7 @@ dma_set_mask (struct device *dev, u64 mask) extern int dma_get_cache_alignment(void); static inline void -dma_cache_sync (struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) +dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) { /* * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to @@ -60,6 +59,6 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, mb(); } -#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ +#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ #endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/trunk/include/asm-ia64/futex.h b/trunk/include/asm-ia64/futex.h index 8a98a2654139..07d77f3a8cbe 100644 --- a/trunk/include/asm-ia64/futex.h +++ b/trunk/include/asm-ia64/futex.h @@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-ia64/pgalloc.h b/trunk/include/asm-ia64/pgalloc.h index 393e04c42a2c..9cb68e9b377e 100644 --- a/trunk/include/asm-ia64/pgalloc.h +++ b/trunk/include/asm-ia64/pgalloc.h @@ -60,7 +60,7 @@ static inline void *pgtable_quicklist_alloc(void) static inline void pgtable_quicklist_free(void *pgtable_entry) { #ifdef CONFIG_NUMA - int nid = page_to_nid(virt_to_page(pgtable_entry)); + unsigned long nid = page_to_nid(virt_to_page(pgtable_entry)); if (unlikely(nid != numa_node_id())) { free_page((unsigned long)pgtable_entry); diff --git a/trunk/include/asm-m32r/setup.h b/trunk/include/asm-m32r/setup.h index 6a0b32202d4e..52f4fa29abfc 100644 --- a/trunk/include/asm-m32r/setup.h +++ b/trunk/include/asm-m32r/setup.h @@ -1,11 +1,6 @@ /* * This is set up by the setup-routine at boot-time */ - -#define COMMAND_LINE_SIZE 512 - -#ifdef __KERNEL__ - #define PARAM ((unsigned char *)empty_zero_page) #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) @@ -23,6 +18,8 @@ #define SCREEN_INFO (*(struct screen_info *) (PARAM+0x200)) +#define COMMAND_LINE_SIZE (512) + #define RAMDISK_IMAGE_START_MASK (0x07FF) #define RAMDISK_PROMPT_FLAG (0x8000) #define RAMDISK_LOAD_FLAG (0x4000) @@ -30,5 +27,3 @@ extern unsigned long memory_start; extern unsigned long memory_end; -#endif /* __KERNEL__ */ - diff --git a/trunk/include/asm-m32r/unistd.h b/trunk/include/asm-m32r/unistd.h index 5b66bd3c6ed6..95aa34298d82 100644 --- a/trunk/include/asm-m32r/unistd.h +++ b/trunk/include/asm-m32r/unistd.h @@ -296,6 +296,117 @@ #ifdef __KERNEL__ #define NR_syscalls 285 +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see + * + */ + +#include /* SYSCALL_* */ + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + /* Avoid using "res" which is declared to be in register r0; \ + errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __res __asm__("r0"); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __res __asm__ ("r0") = (long)(arg1); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno), "0" (__res) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __arg2 __asm__ ("r1") = (long)(arg2); \ +register long __res __asm__ ("r0") = (long)(arg1); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno), "0" (__res), "r" (__arg2) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __arg3 __asm__ ("r2") = (long)(arg3); \ +register long __arg2 __asm__ ("r1") = (long)(arg2); \ +register long __res __asm__ ("r0") = (long)(arg1); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno), "0" (__res), "r" (__arg2), \ + "r" (__arg3) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __arg4 __asm__ ("r3") = (long)(arg4); \ +register long __arg3 __asm__ ("r2") = (long)(arg3); \ +register long __arg2 __asm__ ("r1") = (long)(arg2); \ +register long __res __asm__ ("r0") = (long)(arg1); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno), "0" (__res), "r" (__arg2), \ + "r" (__arg3), "r" (__arg4) \ + : "memory"); \ +__syscall_return(type,__res); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +register long __scno __asm__ ("r7") = __NR_##name; \ +register long __arg5 __asm__ ("r4") = (long)(arg5); \ +register long __arg4 __asm__ ("r3") = (long)(arg4); \ +register long __arg3 __asm__ ("r2") = (long)(arg3); \ +register long __arg2 __asm__ ("r1") = (long)(arg2); \ +register long __res __asm__ ("r0") = (long)(arg1); \ +__asm__ __volatile__ (\ + "trap #" SYSCALL_VECTOR "|| nop"\ + : "=r" (__res) \ + : "r" (__scno), "0" (__res), "r" (__arg2), \ + "r" (__arg3), "r" (__arg4), "r" (__arg5) \ + : "memory"); \ +__syscall_return(type,__res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-m68k/dma-mapping.h b/trunk/include/asm-m68k/dma-mapping.h index 00259ed6fc95..d90d841d3dfd 100644 --- a/trunk/include/asm-m68k/dma-mapping.h +++ b/trunk/include/asm-m68k/dma-mapping.h @@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void) return 1 << L1_CACHE_SHIFT; } -static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +static inline int dma_is_consistent(dma_addr_t dma_addr) { return 0; } @@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, { dma_free_coherent(dev, size, addr, handle); } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { /* we use coherent allocation, so not much to do here. */ diff --git a/trunk/include/asm-m68k/setup.h b/trunk/include/asm-m68k/setup.h index 2a8853cd6554..7facc9a46e74 100644 --- a/trunk/include/asm-m68k/setup.h +++ b/trunk/include/asm-m68k/setup.h @@ -41,12 +41,8 @@ #define MACH_Q40 10 #define MACH_SUN3X 11 -#define COMMAND_LINE_SIZE 256 - #ifdef __KERNEL__ -#define CL_SIZE COMMAND_LINE_SIZE - #ifndef __ASSEMBLY__ extern unsigned long m68k_machtype; #endif /* !__ASSEMBLY__ */ @@ -359,6 +355,8 @@ extern int m68k_is040or060; */ #define NUM_MEMINFO 4 +#define CL_SIZE 256 +#define COMMAND_LINE_SIZE CL_SIZE #ifndef __ASSEMBLY__ struct mem_info { diff --git a/trunk/include/asm-m68k/unistd.h b/trunk/include/asm-m68k/unistd.h index fdbb60e6a0d4..ad4348058c66 100644 --- a/trunk/include/asm-m68k/unistd.h +++ b/trunk/include/asm-m68k/unistd.h @@ -317,6 +317,103 @@ #ifdef __KERNEL__ #define NR_syscalls 311 +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see + */ + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + /* avoid using res which is declared to be in register d0; \ + errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) ); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,atype,a) \ +type name(atype a) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +register long __a __asm__ ("%d1") = (long)(a); \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) \ + : "d" (__a) ); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,atype,a,btype,b) \ +type name(atype a,btype b) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +register long __a __asm__ ("%d1") = (long)(a); \ +register long __b __asm__ ("%d2") = (long)(b); \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) \ + : "d" (__a), "d" (__b) \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,atype,a,btype,b,ctype,c) \ +type name(atype a,btype b,ctype c) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +register long __a __asm__ ("%d1") = (long)(a); \ +register long __b __asm__ ("%d2") = (long)(b); \ +register long __c __asm__ ("%d3") = (long)(c); \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) \ + : "d" (__a), "d" (__b), \ + "d" (__c) \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \ +type name (atype a, btype b, ctype c, dtype d) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +register long __a __asm__ ("%d1") = (long)(a); \ +register long __b __asm__ ("%d2") = (long)(b); \ +register long __c __asm__ ("%d3") = (long)(c); \ +register long __d __asm__ ("%d4") = (long)(d); \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) \ + : "d" (__a), "d" (__b), \ + "d" (__c), "d" (__d) \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ +type name (atype a,btype b,ctype c,dtype d,etype e) \ +{ \ +register long __res __asm__ ("%d0") = __NR_##name; \ +register long __a __asm__ ("%d1") = (long)(a); \ +register long __b __asm__ ("%d2") = (long)(b); \ +register long __c __asm__ ("%d3") = (long)(c); \ +register long __d __asm__ ("%d4") = (long)(d); \ +register long __e __asm__ ("%d5") = (long)(e); \ +__asm__ __volatile__ ("trap #0" \ + : "+d" (__res) \ + : "d" (__a), "d" (__b), \ + "d" (__c), "d" (__d), "d" (__e) \ + ); \ +__syscall_return(type,__res); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-m68knommu/irq.h b/trunk/include/asm-m68knommu/irq.h index 7b8f874f8429..45e7a2fd1689 100644 --- a/trunk/include/asm-m68knommu/irq.h +++ b/trunk/include/asm-m68knommu/irq.h @@ -86,6 +86,5 @@ extern void (*mach_disable_irq)(unsigned int); #define enable_irq(x) do { } while (0) #define disable_irq(x) do { } while (0) #define disable_irq_nosync(x) disable_irq(x) -#define irq_canonicalize(irq) (irq) #endif /* _M68K_IRQ_H_ */ diff --git a/trunk/include/asm-m68knommu/rtc.h b/trunk/include/asm-m68knommu/rtc.h deleted file mode 100644 index eaf18ec83c8e..000000000000 --- a/trunk/include/asm-m68knommu/rtc.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-m68knommu/setup.h b/trunk/include/asm-m68knommu/setup.h index fb86bb2a6078..d2b0fcce41b2 100644 --- a/trunk/include/asm-m68knommu/setup.h +++ b/trunk/include/asm-m68knommu/setup.h @@ -1,10 +1,5 @@ -#ifdef __KERNEL__ - #include /* We have a bigger command line buffer. */ #undef COMMAND_LINE_SIZE - -#endif /* __KERNEL__ */ - #define COMMAND_LINE_SIZE 512 diff --git a/trunk/include/asm-m68knommu/ucontext.h b/trunk/include/asm-m68knommu/ucontext.h index 713a27f901cd..5d570cedbb02 100644 --- a/trunk/include/asm-m68knommu/ucontext.h +++ b/trunk/include/asm-m68knommu/ucontext.h @@ -5,17 +5,21 @@ typedef int greg_t; #define NGREG 18 typedef greg_t gregset_t[NGREG]; +#ifdef CONFIG_FPU typedef struct fpregset { int f_pcr; int f_psr; int f_fpiaddr; int f_fpregs[8][3]; } fpregset_t; +#endif struct mcontext { int version; gregset_t gregs; +#ifdef CONFIG_FPU fpregset_t fpregs; +#endif }; #define MCONTEXT_VERSION 2 @@ -25,7 +29,9 @@ struct ucontext { struct ucontext *uc_link; stack_t uc_stack; struct mcontext uc_mcontext; +#ifdef CONFIG_FPU unsigned long uc_filler[80]; +#endif sigset_t uc_sigmask; /* mask last for extensibility */ }; diff --git a/trunk/include/asm-m68knommu/unistd.h b/trunk/include/asm-m68knommu/unistd.h index 82e03195f325..ebaf03197114 100644 --- a/trunk/include/asm-m68knommu/unistd.h +++ b/trunk/include/asm-m68knommu/unistd.h @@ -318,6 +318,156 @@ #ifdef __KERNEL__ #define NR_syscalls 311 +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see + */ + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + /* avoid using res which is declared to be in register d0; \ + errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _syscall0(type, name) \ +type name(void) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name) \ + : "cc", "%d0"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} + +#define _syscall1(type, name, atype, a) \ +type name(atype a) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %2, %%d1\n\t" \ + "movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name), \ + "g" ((long)a) \ + : "cc", "%d0", "%d1"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} + +#define _syscall2(type, name, atype, a, btype, b) \ +type name(atype a, btype b) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %3, %%d2\n\t" \ + "movel %2, %%d1\n\t" \ + "movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name), \ + "a" ((long)a), \ + "g" ((long)b) \ + : "cc", "%d0", "%d1", "%d2"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} + +#define _syscall3(type, name, atype, a, btype, b, ctype, c) \ +type name(atype a, btype b, ctype c) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %4, %%d3\n\t" \ + "movel %3, %%d2\n\t" \ + "movel %2, %%d1\n\t" \ + "movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name), \ + "a" ((long)a), \ + "a" ((long)b), \ + "g" ((long)c) \ + : "cc", "%d0", "%d1", "%d2", "%d3"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} + +#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \ +type name(atype a, btype b, ctype c, dtype d) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %5, %%d4\n\t" \ + "movel %4, %%d3\n\t" \ + "movel %3, %%d2\n\t" \ + "movel %2, %%d1\n\t" \ + "movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name), \ + "a" ((long)a), \ + "a" ((long)b), \ + "a" ((long)c), \ + "g" ((long)d) \ + : "cc", "%d0", "%d1", "%d2", "%d3", \ + "%d4"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} + +#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e) \ +type name(atype a, btype b, ctype c, dtype d, etype e) \ +{ \ + long __res; \ + __asm__ __volatile__ ("movel %6, %%d5\n\t" \ + "movel %5, %%d4\n\t" \ + "movel %4, %%d3\n\t" \ + "movel %3, %%d2\n\t" \ + "movel %2, %%d1\n\t" \ + "movel %1, %%d0\n\t" \ + "trap #0\n\t" \ + "movel %%d0, %0" \ + : "=g" (__res) \ + : "i" (__NR_##name), \ + "a" ((long)a), \ + "a" ((long)b), \ + "a" ((long)c), \ + "a" ((long)d), \ + "g" ((long)e) \ + : "cc", "%d0", "%d1", "%d2", "%d3", \ + "%d4", "%d5"); \ + if ((unsigned long)(__res) >= (unsigned long)(-125)) { \ + errno = -__res; \ + __res = -1; \ + } \ + return (type)__res; \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-mips/dma-mapping.h b/trunk/include/asm-mips/dma-mapping.h index 236d1a467cc7..43288634c38a 100644 --- a/trunk/include/asm-mips/dma-mapping.h +++ b/trunk/include/asm-mips/dma-mapping.h @@ -63,9 +63,9 @@ dma_get_cache_alignment(void) return 128; } -extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); +extern int dma_is_consistent(dma_addr_t dma_addr); -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +extern void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction); #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY diff --git a/trunk/include/asm-mips/futex.h b/trunk/include/asm-mips/futex.h index 47e5679c2353..927a216bd530 100644 --- a/trunk/include/asm-mips/futex.h +++ b/trunk/include/asm-mips/futex.h @@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-mips/highmem.h b/trunk/include/asm-mips/highmem.h index f8c8182f7f2e..c976bfaaba83 100644 --- a/trunk/include/asm-mips/highmem.h +++ b/trunk/include/asm-mips/highmem.h @@ -21,7 +21,6 @@ #include #include -#include #include /* undef for production */ @@ -71,16 +70,11 @@ static inline void *kmap(struct page *page) static inline void *kmap_atomic(struct page *page, enum km_type type) { - pagefault_disable(); return page_address(page); } -static inline void kunmap_atomic(void *kvaddr, enum km_type type) -{ - pagefault_enable(); -} - -#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) +static inline void kunmap_atomic(void *kvaddr, enum km_type type) { } +#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) diff --git a/trunk/include/asm-mips/i8259.h b/trunk/include/asm-mips/i8259.h index 4df8d8b118c0..0214abe3f0af 100644 --- a/trunk/include/asm-mips/i8259.h +++ b/trunk/include/asm-mips/i8259.h @@ -19,31 +19,10 @@ #include -/* i8259A PIC registers */ -#define PIC_MASTER_CMD 0x20 -#define PIC_MASTER_IMR 0x21 -#define PIC_MASTER_ISR PIC_MASTER_CMD -#define PIC_MASTER_POLL PIC_MASTER_ISR -#define PIC_MASTER_OCW3 PIC_MASTER_ISR -#define PIC_SLAVE_CMD 0xa0 -#define PIC_SLAVE_IMR 0xa1 - -/* i8259A PIC related value */ -#define PIC_CASCADE_IR 2 -#define MASTER_ICW4_DEFAULT 0x01 -#define SLAVE_ICW4_DEFAULT 0x01 -#define PIC_ICW4_AEOI 2 - extern spinlock_t i8259A_lock; -extern void init_8259A(int auto_eoi); -extern void enable_8259A_irq(unsigned int irq); -extern void disable_8259A_irq(unsigned int irq); - extern void init_i8259_irqs(void); -#define I8259A_IRQ_BASE 0 - /* * Do the traditional i8259 interrupt polling thing. This is for the few * cases where no better interrupt acknowledge method is available and we @@ -56,15 +35,15 @@ static inline int i8259_irq(void) spin_lock(&i8259A_lock); /* Perform an interrupt acknowledge cycle on controller 1. */ - outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */ - irq = inb(PIC_MASTER_CMD) & 7; - if (irq == PIC_CASCADE_IR) { + outb(0x0C, 0x20); /* prepare for poll */ + irq = inb(0x20) & 7; + if (irq == 2) { /* * Interrupt is cascaded so perform interrupt * acknowledge on controller 2. */ - outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */ - irq = (inb(PIC_SLAVE_CMD) & 7) + 8; + outb(0x0C, 0xA0); /* prepare for poll */ + irq = (inb(0xA0) & 7) + 8; } if (unlikely(irq == 7)) { @@ -75,14 +54,14 @@ static inline int i8259_irq(void) * significant bit is not set then there is no valid * interrupt. */ - outb(0x0B, PIC_MASTER_ISR); /* ISR register */ - if(~inb(PIC_MASTER_ISR) & 0x80) + outb(0x0B, 0x20); /* ISR register */ + if(~inb(0x20) & 0x80) irq = -1; } spin_unlock(&i8259A_lock); - return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq; + return irq; } #endif /* _ASM_I8259_H */ diff --git a/trunk/include/asm-mips/pgtable-32.h b/trunk/include/asm-mips/pgtable-32.h index 2fbd47eba32d..d20f2e9b28be 100644 --- a/trunk/include/asm-mips/pgtable-32.h +++ b/trunk/include/asm-mips/pgtable-32.h @@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) diff --git a/trunk/include/asm-mips/pgtable-64.h b/trunk/include/asm-mips/pgtable-64.h index a5b18710b6a4..b9b1e86493ee 100644 --- a/trunk/include/asm-mips/pgtable-64.h +++ b/trunk/include/asm-mips/pgtable-64.h @@ -212,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) #define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_offset_map_nested(dir, address) \ diff --git a/trunk/include/asm-mips/setup.h b/trunk/include/asm-mips/setup.h index 70009a902639..737fa4a6912e 100644 --- a/trunk/include/asm-mips/setup.h +++ b/trunk/include/asm-mips/setup.h @@ -1,6 +1,8 @@ +#ifdef __KERNEL__ #ifndef _MIPS_SETUP_H #define _MIPS_SETUP_H #define COMMAND_LINE_SIZE 256 #endif /* __SETUP_H */ +#endif /* __KERNEL__ */ diff --git a/trunk/include/asm-mips/unistd.h b/trunk/include/asm-mips/unistd.h index 696cff39a1d3..ec56aa52f669 100644 --- a/trunk/include/asm-mips/unistd.h +++ b/trunk/include/asm-mips/unistd.h @@ -933,6 +933,268 @@ #ifndef __ASSEMBLY__ +/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ +#define _syscall0(type,name) \ +type name(void) \ +{ \ + register unsigned long __a3 asm("$7"); \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %2\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "=r" (__a3) \ + : "i" (__NR_##name) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +/* + * DANGER: This macro isn't usable for the pipe(2) call + * which has a unusual return convention. + */ +#define _syscall1(type,name,atype,a) \ +type name(atype a) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a3 asm("$7"); \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %3\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "=r" (__a3) \ + : "r" (__a0), "i" (__NR_##name) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#define _syscall2(type,name,atype,a,btype,b) \ +type name(atype a, btype b) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a3 asm("$7"); \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %4\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "=r" (__a3) \ + : "r" (__a0), "r" (__a1), "i" (__NR_##name) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#define _syscall3(type,name,atype,a,btype,b,ctype,c) \ +type name(atype a, btype b, ctype c) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7"); \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %5\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "=r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \ +type name(atype a, btype b, ctype c, dtype d) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7") = (unsigned long) d; \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %5\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "+r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#if (_MIPS_SIM == _MIPS_SIM_ABI32) + +/* + * Using those means your brain needs more than an oil change ;-) + */ + +#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ +type name(atype a, btype b, ctype c, dtype d, etype e) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7") = (unsigned long) d; \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "lw\t$2, %6\n\t" \ + "subu\t$29, 32\n\t" \ + "sw\t$2, 16($29)\n\t" \ + "li\t$2, %5\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + "addiu\t$29, 32\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "+r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \ + "m" ((unsigned long)e) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \ +type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7") = (unsigned long) d; \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "lw\t$2, %6\n\t" \ + "lw\t$8, %7\n\t" \ + "subu\t$29, 32\n\t" \ + "sw\t$2, 16($29)\n\t" \ + "sw\t$8, 20($29)\n\t" \ + "li\t$2, %5\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + "addiu\t$29, 32\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "+r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \ + "m" ((unsigned long)e), "m" ((unsigned long)f) \ + : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */ + +#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) + +#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ +type name (atype a,btype b,ctype c,dtype d,etype e) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7") = (unsigned long) d; \ + register unsigned long __a4 asm("$8") = (unsigned long) e; \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %6\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "+r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \ + : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \ +type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \ +{ \ + register unsigned long __a0 asm("$4") = (unsigned long) a; \ + register unsigned long __a1 asm("$5") = (unsigned long) b; \ + register unsigned long __a2 asm("$6") = (unsigned long) c; \ + register unsigned long __a3 asm("$7") = (unsigned long) d; \ + register unsigned long __a4 asm("$8") = (unsigned long) e; \ + register unsigned long __a5 asm("$9") = (unsigned long) f; \ + unsigned long __v0; \ + \ + __asm__ volatile ( \ + ".set\tnoreorder\n\t" \ + "li\t$2, %7\t\t\t# " #name "\n\t" \ + "syscall\n\t" \ + "move\t%0, $2\n\t" \ + ".set\treorder" \ + : "=&r" (__v0), "+r" (__a3) \ + : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \ + "i" (__NR_##name) \ + : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ + "memory"); \ + \ + if (__a3 == 0) \ + return (type) __v0; \ + errno = __v0; \ + return (type) -1; \ +} + +#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */ + + #define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-parisc/dma-mapping.h b/trunk/include/asm-parisc/dma-mapping.h index 66f0b408c669..1e387e1dad30 100644 --- a/trunk/include/asm-parisc/dma-mapping.h +++ b/trunk/include/asm-parisc/dma-mapping.h @@ -191,13 +191,13 @@ dma_get_cache_alignment(void) } static inline int -dma_is_consistent(struct device *dev, dma_addr_t dma_addr) +dma_is_consistent(dma_addr_t dma_addr) { return (hppa_dma_ops->dma_sync_single_for_cpu == NULL); } static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { if(hppa_dma_ops->dma_sync_single_for_cpu) diff --git a/trunk/include/asm-parisc/futex.h b/trunk/include/asm-parisc/futex.h index dbee6e60aa81..d84bbb283fd1 100644 --- a/trunk/include/asm-parisc/futex.h +++ b/trunk/include/asm-parisc/futex.h @@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-powerpc/dma-mapping.h b/trunk/include/asm-powerpc/dma-mapping.h index 7c7de87bd8ae..7e38b5fddada 100644 --- a/trunk/include/asm-powerpc/dma-mapping.h +++ b/trunk/include/asm-powerpc/dma-mapping.h @@ -342,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE -#define dma_is_consistent(d, h) (0) +#define dma_is_consistent(d) (0) #else -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) #endif static inline int dma_get_cache_alignment(void) @@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, dma_sync_single_for_device(dev, dma_handle, offset + size, direction); } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); diff --git a/trunk/include/asm-powerpc/elf.h b/trunk/include/asm-powerpc/elf.h index d36426c01b6b..b5436642a109 100644 --- a/trunk/include/asm-powerpc/elf.h +++ b/trunk/include/asm-powerpc/elf.h @@ -124,10 +124,12 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG]; # define ELF_DATA ELFDATA2MSB typedef elf_greg_t64 elf_greg_t; typedef elf_gregset_t64 elf_gregset_t; +# define elf_addr_t unsigned long #else /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */ typedef elf_greg_t32 elf_greg_t; typedef elf_gregset_t32 elf_gregset_t; +# define elf_addr_t __u32 #endif /* ELF_ARCH */ /* Floating point registers */ diff --git a/trunk/include/asm-powerpc/futex.h b/trunk/include/asm-powerpc/futex.h index 3f3673fd3ff3..936422e54891 100644 --- a/trunk/include/asm-powerpc/futex.h +++ b/trunk/include/asm-powerpc/futex.h @@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-powerpc/pgalloc.h b/trunk/include/asm-powerpc/pgalloc.h index b0830db68f8a..ae63db7b3e7d 100644 --- a/trunk/include/asm-powerpc/pgalloc.h +++ b/trunk/include/asm-powerpc/pgalloc.h @@ -11,7 +11,7 @@ #include #include -extern struct kmem_cache *pgtable_cache[]; +extern kmem_cache_t *pgtable_cache[]; #ifdef CONFIG_PPC_64K_PAGES #define PTE_CACHE_NUM 0 diff --git a/trunk/include/asm-powerpc/setup.h b/trunk/include/asm-powerpc/setup.h index 817fac0a0714..3d9740aae018 100644 --- a/trunk/include/asm-powerpc/setup.h +++ b/trunk/include/asm-powerpc/setup.h @@ -1,6 +1,9 @@ #ifndef _ASM_POWERPC_SETUP_H #define _ASM_POWERPC_SETUP_H +#ifdef __KERNEL__ + #define COMMAND_LINE_SIZE 512 +#endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/trunk/include/asm-powerpc/unistd.h b/trunk/include/asm-powerpc/unistd.h index 0ae954e3d258..04b6c17cc59b 100644 --- a/trunk/include/asm-powerpc/unistd.h +++ b/trunk/include/asm-powerpc/unistd.h @@ -334,6 +334,115 @@ #ifndef __ASSEMBLY__ +/* On powerpc a system call basically clobbers the same registers like a + * function call, with the exception of LR (which is needed for the + * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal + * an error return status). + */ + +#define __syscall_nr(nr, type, name, args...) \ + unsigned long __sc_ret, __sc_err; \ + { \ + register unsigned long __sc_0 __asm__ ("r0"); \ + register unsigned long __sc_3 __asm__ ("r3"); \ + register unsigned long __sc_4 __asm__ ("r4"); \ + register unsigned long __sc_5 __asm__ ("r5"); \ + register unsigned long __sc_6 __asm__ ("r6"); \ + register unsigned long __sc_7 __asm__ ("r7"); \ + register unsigned long __sc_8 __asm__ ("r8"); \ + \ + __sc_loadargs_##nr(name, args); \ + __asm__ __volatile__ \ + ("sc \n\t" \ + "mfcr %0 " \ + : "=&r" (__sc_0), \ + "=&r" (__sc_3), "=&r" (__sc_4), \ + "=&r" (__sc_5), "=&r" (__sc_6), \ + "=&r" (__sc_7), "=&r" (__sc_8) \ + : __sc_asm_input_##nr \ + : "cr0", "ctr", "memory", \ + "r9", "r10","r11", "r12"); \ + __sc_ret = __sc_3; \ + __sc_err = __sc_0; \ + } \ + if (__sc_err & 0x10000000) \ + { \ + errno = __sc_ret; \ + __sc_ret = -1; \ + } \ + return (type) __sc_ret + +#define __sc_loadargs_0(name, dummy...) \ + __sc_0 = __NR_##name +#define __sc_loadargs_1(name, arg1) \ + __sc_loadargs_0(name); \ + __sc_3 = (unsigned long) (arg1) +#define __sc_loadargs_2(name, arg1, arg2) \ + __sc_loadargs_1(name, arg1); \ + __sc_4 = (unsigned long) (arg2) +#define __sc_loadargs_3(name, arg1, arg2, arg3) \ + __sc_loadargs_2(name, arg1, arg2); \ + __sc_5 = (unsigned long) (arg3) +#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \ + __sc_loadargs_3(name, arg1, arg2, arg3); \ + __sc_6 = (unsigned long) (arg4) +#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \ + __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \ + __sc_7 = (unsigned long) (arg5) +#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \ + __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \ + __sc_8 = (unsigned long) (arg6) + +#define __sc_asm_input_0 "0" (__sc_0) +#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3) +#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4) +#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5) +#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6) +#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7) +#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8) + +#define _syscall0(type,name) \ +type name(void) \ +{ \ + __syscall_nr(0, type, name); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ + __syscall_nr(1, type, name, arg1); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1, type2 arg2) \ +{ \ + __syscall_nr(2, type, name, arg1, arg2); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1, type2 arg2, type3 arg3) \ +{ \ + __syscall_nr(3, type, name, arg1, arg2, arg3); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ + __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \ +} +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ +{ \ + __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \ +} + + #include #include #include diff --git a/trunk/include/asm-ppc/highmem.h b/trunk/include/asm-ppc/highmem.h index f7b21ee302b4..1d2c4ef81c22 100644 --- a/trunk/include/asm-ppc/highmem.h +++ b/trunk/include/asm-ppc/highmem.h @@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); + inc_preempt_count(); if (!PageHighMem(page)) return page_address(page); @@ -101,7 +101,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < KMAP_FIX_BEGIN) { // FIXME - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); return; } @@ -114,7 +115,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) pte_clear(&init_mm, vaddr, kmap_pte+idx); flush_tlb_page(NULL, vaddr); #endif - pagefault_enable(); + dec_preempt_count(); + preempt_check_resched(); } static inline struct page *kmap_atomic_to_page(void *ptr) diff --git a/trunk/include/asm-s390/setup.h b/trunk/include/asm-s390/setup.h index 9574fe80a046..7664bacdd832 100644 --- a/trunk/include/asm-s390/setup.h +++ b/trunk/include/asm-s390/setup.h @@ -8,13 +8,12 @@ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H -#define COMMAND_LINE_SIZE 896 - #ifdef __KERNEL__ #include #define PARMAREA 0x10400 +#define COMMAND_LINE_SIZE 896 #define MEMORY_CHUNKS 16 /* max 0x7fff */ #define IPL_PARMBLOCK_ORIGIN 0x2000 diff --git a/trunk/include/asm-s390/unistd.h b/trunk/include/asm-s390/unistd.h index fb6fef97d739..71d3c21b84f0 100644 --- a/trunk/include/asm-s390/unistd.h +++ b/trunk/include/asm-s390/unistd.h @@ -345,6 +345,160 @@ #ifdef __KERNEL__ +#include + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +#define _svc_clobber "1", "cc", "memory" + +#define _syscall0(type,name) \ +type name(void) { \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name) \ + : _svc_clobber); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) { \ + register type1 __arg1 asm("2") = arg1; \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name), \ + "0" (__arg1) \ + : _svc_clobber); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1, type2 arg2) { \ + register type1 __arg1 asm("2") = arg1; \ + register type2 __arg2 asm("3") = arg2; \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name), \ + "0" (__arg1), \ + "d" (__arg2) \ + : _svc_clobber ); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1, type2 arg2, type3 arg3) { \ + register type1 __arg1 asm("2") = arg1; \ + register type2 __arg2 asm("3") = arg2; \ + register type3 __arg3 asm("4") = arg3; \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name), \ + "0" (__arg1), \ + "d" (__arg2), \ + "d" (__arg3) \ + : _svc_clobber); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \ + type4,name4) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ + register type1 __arg1 asm("2") = arg1; \ + register type2 __arg2 asm("3") = arg2; \ + register type3 __arg3 asm("4") = arg3; \ + register type4 __arg4 asm("5") = arg4; \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name), \ + "0" (__arg1), \ + "d" (__arg2), \ + "d" (__arg3), \ + "d" (__arg4) \ + : _svc_clobber); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \ + type4,name4,type5,name5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ + type5 arg5) { \ + register type1 __arg1 asm("2") = arg1; \ + register type2 __arg2 asm("3") = arg2; \ + register type3 __arg3 asm("4") = arg3; \ + register type4 __arg4 asm("5") = arg4; \ + register type5 __arg5 asm("6") = arg5; \ + register long __svcres asm("2"); \ + long __res; \ + asm volatile( \ + " .if %1 < 256\n" \ + " svc %b1\n" \ + " .else\n" \ + " la %%r1,%1\n" \ + " svc 0\n" \ + " .endif" \ + : "=d" (__svcres) \ + : "i" (__NR_##name), \ + "0" (__arg1), \ + "d" (__arg2), \ + "d" (__arg3), \ + "d" (__arg4), \ + "d" (__arg5) \ + : _svc_clobber); \ + __res = __svcres; \ + __syscall_return(type,__res); \ +} + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM diff --git a/trunk/include/asm-sh/atomic.h b/trunk/include/asm-sh/atomic.h index 28305c3cbddf..8bdc1ba56f73 100644 --- a/trunk/include/asm-sh/atomic.h +++ b/trunk/include/asm-sh/atomic.h @@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v) unsigned long tmp; __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_add \n" +" add %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" - : "=&z" (tmp) + : "=&z" (tmp), "=r" (&v->counter) : "r" (i), "r" (&v->counter) : "t"); #else @@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v) unsigned long tmp; __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_sub \n" +" sub %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" - : "=&z" (tmp) + : "=&z" (tmp), "=r" (&v->counter) : "r" (i), "r" (&v->counter) : "t"); #else @@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v) #ifdef CONFIG_CPU_SH4A __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add_return \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_add_return \n" +" add %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" " synco \n" - : "=&z" (temp) + : "=&z" (temp), "=r" (&v->counter) : "r" (i), "r" (&v->counter) : "t"); #else @@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) #ifdef CONFIG_CPU_SH4A __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub_return \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_sub_return \n" +" sub %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" " synco \n" - : "=&z" (temp) + : "=&z" (temp), "=r" (&v->counter) : "r" (i), "r" (&v->counter) : "t"); #else @@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) unsigned long tmp; __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_clear_mask \n" -" and %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_clear_mask \n" +" and %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" - : "=&z" (tmp) + : "=&z" (tmp), "=r" (&v->counter) : "r" (~mask), "r" (&v->counter) : "t"); #else @@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) unsigned long tmp; __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_set_mask \n" -" or %1, %0 \n" -" movco.l %0, @%2 \n" +"1: movli.l @%3, %0 ! atomic_set_mask \n" +" or %2, %0 \n" +" movco.l %0, @%3 \n" " bf 1b \n" - : "=&z" (tmp) + : "=&z" (tmp), "=r" (&v->counter) : "r" (mask), "r" (&v->counter) : "t"); #else diff --git a/trunk/include/asm-sh/bugs.h b/trunk/include/asm-sh/bugs.h index 795047da5e17..beeea40f549e 100644 --- a/trunk/include/asm-sh/bugs.h +++ b/trunk/include/asm-sh/bugs.h @@ -23,20 +23,16 @@ static void __init check_bugs(void) cpu_data->loops_per_jiffy = loops_per_jiffy; switch (cpu_data->type) { - case CPU_SH7604 ... CPU_SH7619: + case CPU_SH7604: *p++ = '2'; break; - case CPU_SH7206: - *p++ = '2'; - *p++ = 'a'; - break; case CPU_SH7705 ... CPU_SH7300: *p++ = '3'; break; case CPU_SH7750 ... CPU_SH4_501: *p++ = '4'; break; - case CPU_SH7770 ... CPU_SH7785: + case CPU_SH7770 ... CPU_SH7781: *p++ = '4'; *p++ = 'a'; break; diff --git a/trunk/include/asm-sh/clock.h b/trunk/include/asm-sh/clock.h index 1df92807f8c5..fdfb75b30f0d 100644 --- a/trunk/include/asm-sh/clock.h +++ b/trunk/include/asm-sh/clock.h @@ -4,7 +4,6 @@ #include #include #include -#include struct clk; @@ -19,7 +18,7 @@ struct clk_ops { struct clk { struct list_head node; const char *name; - int id; + struct module *owner; struct clk *parent; @@ -41,13 +40,22 @@ void arch_init_clk_ops(struct clk_ops **, int type); int clk_init(void); int __clk_enable(struct clk *); +int clk_enable(struct clk *); + void __clk_disable(struct clk *); +void clk_disable(struct clk *); +int clk_set_rate(struct clk *, unsigned long rate); +unsigned long clk_get_rate(struct clk *); void clk_recalc_rate(struct clk *); +struct clk *clk_get(const char *id); +void clk_put(struct clk *); + int clk_register(struct clk *); void clk_unregister(struct clk *); int show_clocks(struct seq_file *m); #endif /* __ASM_SH_CLOCK_H */ + diff --git a/trunk/include/asm-sh/cpu-sh2/cache.h b/trunk/include/asm-sh/cpu-sh2/cache.h index 20b9796842dc..cd96402e8562 100644 --- a/trunk/include/asm-sh/cpu-sh2/cache.h +++ b/trunk/include/asm-sh/cpu-sh2/cache.h @@ -12,7 +12,6 @@ #define L1_CACHE_SHIFT 4 -#if defined(CONFIG_CPU_SUBTYPE_SH7604) #define CCR 0xfffffe92 /* Address of Cache Control Register */ #define CCR_CACHE_CE 0x01 /* Cache enable */ @@ -28,26 +27,5 @@ #define CCR_CACHE_ORA CCR_CACHE_TW #define CCR_CACHE_WT 0x00 /* SH-2 is _always_ write-through */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -#define CCR1 0xffffffec -#define CCR CCR1 - -#define CCR_CACHE_CE 0x01 /* Cache enable */ -#define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */ - /* 0x00000000-0x7fffffff: Write-through */ - /* 0x80000000-0x9fffffff: Write-back */ - /* 0xc0000000-0xdfffffff: Write-through */ -#define CCR_CACHE_CB 0x00 /* CCR[bit1=0,bit2=0] */ - /* 0x00000000-0x7fffffff: Write-back */ - /* 0x80000000-0x9fffffff: Write-through */ - /* 0xc0000000-0xdfffffff: Write-back */ -#define CCR_CACHE_CF 0x08 /* Cache invalidate */ - -#define CACHE_OC_ADDRESS_ARRAY 0xf0000000 -#define CACHE_OC_DATA_ARRAY 0xf1000000 - -#define CCR_CACHE_ENABLE CCR_CACHE_CE -#define CCR_CACHE_INVALIDATE CCR_CACHE_CF -#endif #endif /* __ASM_CPU_SH2_CACHE_H */ diff --git a/trunk/include/asm-sh/cpu-sh2/freq.h b/trunk/include/asm-sh/cpu-sh2/freq.h deleted file mode 100644 index 31de475da70b..000000000000 --- a/trunk/include/asm-sh/cpu-sh2/freq.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * include/asm-sh/cpu-sh2/freq.h - * - * Copyright (C) 2006 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_CPU_SH2_FREQ_H -#define __ASM_CPU_SH2_FREQ_H - -#if defined(CONFIG_CPU_SUBTYPE_SH7619) -#define FREQCR 0xf815ff80 -#endif - -#endif /* __ASM_CPU_SH2_FREQ_H */ - diff --git a/trunk/include/asm-sh/cpu-sh2/mmu_context.h b/trunk/include/asm-sh/cpu-sh2/mmu_context.h deleted file mode 100644 index beeb299e01ec..000000000000 --- a/trunk/include/asm-sh/cpu-sh2/mmu_context.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-sh/cpu-sh2/mmu_context.h - * - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_CPU_SH2_MMU_CONTEXT_H -#define __ASM_CPU_SH2_MMU_CONTEXT_H - -/* No MMU */ - -#endif /* __ASM_CPU_SH2_MMU_CONTEXT_H */ - diff --git a/trunk/include/asm-sh/cpu-sh2/timer.h b/trunk/include/asm-sh/cpu-sh2/timer.h deleted file mode 100644 index a39c241e8195..000000000000 --- a/trunk/include/asm-sh/cpu-sh2/timer.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_CPU_SH2_TIMER_H -#define __ASM_CPU_SH2_TIMER_H - -/* Nothing needed yet */ - -#endif /* __ASM_CPU_SH2_TIMER_H */ diff --git a/trunk/include/asm-sh/cpu-sh2a/addrspace.h b/trunk/include/asm-sh/cpu-sh2a/addrspace.h deleted file mode 100644 index 3d2e9aa21522..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/addrspace.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/cache.h b/trunk/include/asm-sh/cpu-sh2a/cache.h deleted file mode 100644 index 3e4b9e480982..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/cache.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * include/asm-sh/cpu-sh2a/cache.h - * - * Copyright (C) 2004 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_CPU_SH2A_CACHE_H -#define __ASM_CPU_SH2A_CACHE_H - -#define L1_CACHE_SHIFT 4 - -#define CCR1 0xfffc1000 -#define CCR2 0xfffc1004 - -/* CCR1 behaves more like the traditional CCR */ -#define CCR CCR1 - -/* - * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not - * listed here are reserved. - */ -#define CCR_CACHE_CB 0x0000 /* Hack */ -#define CCR_CACHE_OCE 0x0001 -#define CCR_CACHE_WT 0x0002 -#define CCR_CACHE_OCI 0x0008 /* OCF */ -#define CCR_CACHE_ICE 0x0100 -#define CCR_CACHE_ICI 0x0800 /* ICF */ - -#define CACHE_IC_ADDRESS_ARRAY 0xf0000000 -#define CACHE_OC_ADDRESS_ARRAY 0xf0800000 - -#define CCR_CACHE_ENABLE (CCR_CACHE_OCE | CCR_CACHE_ICE) -#define CCR_CACHE_INVALIDATE (CCR_CACHE_OCI | CCR_CACHE_ICI) - -#endif /* __ASM_CPU_SH2A_CACHE_H */ - diff --git a/trunk/include/asm-sh/cpu-sh2a/cacheflush.h b/trunk/include/asm-sh/cpu-sh2a/cacheflush.h deleted file mode 100644 index fa3186c73350..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/cacheflush.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/dma.h b/trunk/include/asm-sh/cpu-sh2a/dma.h deleted file mode 100644 index 0d5ad85c1de8..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/dma.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/freq.h b/trunk/include/asm-sh/cpu-sh2a/freq.h deleted file mode 100644 index e518fff6d10f..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/freq.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * include/asm-sh/cpu-sh2a/freq.h - * - * Copyright (C) 2006 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_CPU_SH2A_FREQ_H -#define __ASM_CPU_SH2A_FREQ_H - -#if defined(CONFIG_CPU_SUBTYPE_SH7206) -#define FREQCR 0xfffe0010 -#endif - -#endif /* __ASM_CPU_SH2A_FREQ_H */ - diff --git a/trunk/include/asm-sh/cpu-sh2a/mmu_context.h b/trunk/include/asm-sh/cpu-sh2a/mmu_context.h deleted file mode 100644 index cd2387f7db9e..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/mmu_context.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/timer.h b/trunk/include/asm-sh/cpu-sh2a/timer.h deleted file mode 100644 index fee504adf11e..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/timer.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/ubc.h b/trunk/include/asm-sh/cpu-sh2a/ubc.h deleted file mode 100644 index cf28062b96a2..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/ubc.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/cpu-sh2a/watchdog.h b/trunk/include/asm-sh/cpu-sh2a/watchdog.h deleted file mode 100644 index c1b3e2488478..000000000000 --- a/trunk/include/asm-sh/cpu-sh2a/watchdog.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/include/asm-sh/dma-mapping.h b/trunk/include/asm-sh/dma-mapping.h index 37ab0c131a4d..56cd4b977232 100644 --- a/trunk/include/asm-sh/dma-mapping.h +++ b/trunk/include/asm-sh/dma-mapping.h @@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, consistent_free(vaddr, size); } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { consistent_sync(vaddr, size, (int)dir); diff --git a/trunk/include/asm-sh/dma.h b/trunk/include/asm-sh/dma.h index faf3051cd429..d9daa028689f 100644 --- a/trunk/include/asm-sh/dma.h +++ b/trunk/include/asm-sh/dma.h @@ -14,7 +14,9 @@ #include #include #include +#include #include +#include /* The maximum address that we can perform a DMA transfer to on this platform */ /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any @@ -44,21 +46,16 @@ * DMAC (dma_info) flags */ enum { - DMAC_CHANNELS_CONFIGURED = 0x01, - DMAC_CHANNELS_TEI_CAPABLE = 0x02, /* Transfer end interrupt */ + DMAC_CHANNELS_CONFIGURED = 0x00, + DMAC_CHANNELS_TEI_CAPABLE = 0x01, }; /* * DMA channel capabilities / flags */ enum { - DMA_CONFIGURED = 0x01, - - /* - * Transfer end interrupt, inherited from DMAC. - * wait_queue used in dma_wait_for_completion. - */ - DMA_TEI_CAPABLE = 0x02, + DMA_TEI_CAPABLE = 0x01, + DMA_CONFIGURED = 0x02, }; extern spinlock_t dma_spin_lock; @@ -71,31 +68,28 @@ struct dma_ops { int (*get_residue)(struct dma_channel *chan); int (*xfer)(struct dma_channel *chan); - int (*configure)(struct dma_channel *chan, unsigned long flags); - int (*extend)(struct dma_channel *chan, unsigned long op, void *param); + void (*configure)(struct dma_channel *chan, unsigned long flags); }; struct dma_channel { - char dev_id[16]; /* unique name per DMAC of channel */ + char dev_id[16]; - unsigned int chan; /* DMAC channel number */ + unsigned int chan; /* Physical channel number */ unsigned int vchan; /* Virtual channel number */ - unsigned int mode; unsigned int count; unsigned long sar; unsigned long dar; - const char **caps; - unsigned long flags; atomic_t busy; + struct semaphore sem; wait_queue_head_t wait_queue; struct sys_device dev; - void *priv_data; + char *name; }; struct dma_info { @@ -109,12 +103,6 @@ struct dma_info { struct dma_channel *channels; struct list_head list; - int first_channel_nr; -}; - -struct dma_chan_caps { - int ch_num; - const char **caplist; }; #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev) @@ -133,8 +121,6 @@ extern int dma_xfer(unsigned int chan, unsigned long from, #define dma_read_page(chan, from, to) \ dma_read(chan, from, to, PAGE_SIZE) -extern int request_dma_bycap(const char **dmac, const char **caps, - const char *dev_id); extern int request_dma(unsigned int chan, const char *dev_id); extern void free_dma(unsigned int chan); extern int get_dma_residue(unsigned int chan); @@ -145,10 +131,6 @@ extern void dma_configure_channel(unsigned int chan, unsigned long flags); extern int register_dmac(struct dma_info *info); extern void unregister_dmac(struct dma_info *info); -extern struct dma_info *get_dma_info_by_name(const char *dmac_name); - -extern int dma_extend(unsigned int chan, unsigned long op, void *param); -extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist); #ifdef CONFIG_SYSFS /* arch/sh/drivers/dma/dma-sysfs.c */ diff --git a/trunk/include/asm-sh/elf.h b/trunk/include/asm-sh/elf.h index 43ca244564b1..fc050fd7645e 100644 --- a/trunk/include/asm-sh/elf.h +++ b/trunk/include/asm-sh/elf.h @@ -74,7 +74,7 @@ typedef struct user_fpu_struct elf_fpregset_t; #define ELF_ARCH EM_SH #define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE PAGE_SIZE +#define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical use of this is to invoke "./ld.so someprog" to test out a new version of diff --git a/trunk/include/asm-sh/entry-macros.S b/trunk/include/asm-sh/entry-macros.S deleted file mode 100644 index 500030eae7aa..000000000000 --- a/trunk/include/asm-sh/entry-macros.S +++ /dev/null @@ -1,33 +0,0 @@ -! entry.S macro define - - .macro cli - stc sr, r0 - or #0xf0, r0 - ldc r0, sr - .endm - - .macro sti - mov #0xf0, r11 - extu.b r11, r11 - not r11, r11 - stc sr, r10 - and r11, r10 -#ifdef CONFIG_HAS_SR_RB - stc k_g_imask, r11 - or r11, r10 -#endif - ldc r10, sr - .endm - - .macro get_current_thread_info, ti, tmp -#ifdef CONFIG_HAS_SR_RB - stc r7_bank, \ti -#else - mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp - shll8 \tmp - shll2 \tmp - mov r15, \ti - and \tmp, \ti -#endif - .endm - diff --git a/trunk/include/asm-sh/irq-sh73180.h b/trunk/include/asm-sh/irq-sh73180.h new file mode 100644 index 000000000000..b28af9a69d72 --- /dev/null +++ b/trunk/include/asm-sh/irq-sh73180.h @@ -0,0 +1,314 @@ +#ifndef __ASM_SH_IRQ_SH73180_H +#define __ASM_SH_IRQ_SH73180_H + +/* + * linux/include/asm-sh/irq-sh73180.h + * + * Copyright (C) 2004 Takashi SHUDO + */ + +#undef INTC_IPRA +#undef INTC_IPRB +#undef INTC_IPRC +#undef INTC_IPRD + +#undef DMTE0_IRQ +#undef DMTE1_IRQ +#undef DMTE2_IRQ +#undef DMTE3_IRQ +#undef DMTE4_IRQ +#undef DMTE5_IRQ +#undef DMTE6_IRQ +#undef DMTE7_IRQ +#undef DMAE_IRQ +#undef DMA_IPR_ADDR +#undef DMA_IPR_POS +#undef DMA_PRIORITY + +#undef INTC_IMCR0 +#undef INTC_IMCR1 +#undef INTC_IMCR2 +#undef INTC_IMCR3 +#undef INTC_IMCR4 +#undef INTC_IMCR5 +#undef INTC_IMCR6 +#undef INTC_IMCR7 +#undef INTC_IMCR8 +#undef INTC_IMCR9 +#undef INTC_IMCR10 + + +#define INTC_IPRA 0xA4080000UL +#define INTC_IPRB 0xA4080004UL +#define INTC_IPRC 0xA4080008UL +#define INTC_IPRD 0xA408000CUL +#define INTC_IPRE 0xA4080010UL +#define INTC_IPRF 0xA4080014UL +#define INTC_IPRG 0xA4080018UL +#define INTC_IPRH 0xA408001CUL +#define INTC_IPRI 0xA4080020UL +#define INTC_IPRJ 0xA4080024UL +#define INTC_IPRK 0xA4080028UL + +#define INTC_IMR0 0xA4080080UL +#define INTC_IMR1 0xA4080084UL +#define INTC_IMR2 0xA4080088UL +#define INTC_IMR3 0xA408008CUL +#define INTC_IMR4 0xA4080090UL +#define INTC_IMR5 0xA4080094UL +#define INTC_IMR6 0xA4080098UL +#define INTC_IMR7 0xA408009CUL +#define INTC_IMR8 0xA40800A0UL +#define INTC_IMR9 0xA40800A4UL +#define INTC_IMR10 0xA40800A8UL +#define INTC_IMR11 0xA40800ACUL + +#define INTC_IMCR0 0xA40800C0UL +#define INTC_IMCR1 0xA40800C4UL +#define INTC_IMCR2 0xA40800C8UL +#define INTC_IMCR3 0xA40800CCUL +#define INTC_IMCR4 0xA40800D0UL +#define INTC_IMCR5 0xA40800D4UL +#define INTC_IMCR6 0xA40800D8UL +#define INTC_IMCR7 0xA40800DCUL +#define INTC_IMCR8 0xA40800E0UL +#define INTC_IMCR9 0xA40800E4UL +#define INTC_IMCR10 0xA40800E8UL +#define INTC_IMCR11 0xA40800ECUL + +#define INTC_ICR0 0xA4140000UL +#define INTC_ICR1 0xA414001CUL + +#define INTMSK0 0xa4140044 +#define INTMSKCLR0 0xa4140064 +#define INTC_INTPRI0 0xa4140010 + +/* + NOTE: + + *_IRQ = (INTEVT2 - 0x200)/0x20 +*/ + +/* TMU0 */ +#define TMU0_IRQ 16 +#define TMU0_IPR_ADDR INTC_IPRA +#define TMU0_IPR_POS 3 +#define TMU0_PRIORITY 2 + +#define TIMER_IRQ 16 +#define TIMER_IPR_ADDR INTC_IPRA +#define TIMER_IPR_POS 3 +#define TIMER_PRIORITY 2 + +/* TMU1 */ +#define TMU1_IRQ 17 +#define TMU1_IPR_ADDR INTC_IPRA +#define TMU1_IPR_POS 2 +#define TMU1_PRIORITY 2 + +/* TMU2 */ +#define TMU2_IRQ 18 +#define TMU2_IPR_ADDR INTC_IPRA +#define TMU2_IPR_POS 1 +#define TMU2_PRIORITY 2 + +/* LCDC */ +#define LCDC_IRQ 28 +#define LCDC_IPR_ADDR INTC_IPRB +#define LCDC_IPR_POS 2 +#define LCDC_PRIORITY 2 + +/* VIO (Video I/O) */ +#define CEU_IRQ 52 +#define BEU_IRQ 53 +#define VEU_IRQ 54 +#define VOU_IRQ 55 +#define VIO_IPR_ADDR INTC_IPRE +#define VIO_IPR_POS 2 +#define VIO_PRIORITY 2 + +/* MFI (Multi Functional Interface) */ +#define MFI_IRQ 56 +#define MFI_IPR_ADDR INTC_IPRE +#define MFI_IPR_POS 1 +#define MFI_PRIORITY 2 + +/* VPU (Video Processing Unit) */ +#define VPU_IRQ 60 +#define VPU_IPR_ADDR INTC_IPRE +#define VPU_IPR_POS 0 +#define VPU_PRIORITY 2 + +/* 3DG */ +#define TDG_IRQ 63 +#define TDG_IPR_ADDR INTC_IPRJ +#define TDG_IPR_POS 2 +#define TDG_PRIORITY 2 + +/* DMAC(1) */ +#define DMTE0_IRQ 48 +#define DMTE1_IRQ 49 +#define DMTE2_IRQ 50 +#define DMTE3_IRQ 51 +#define DMA1_IPR_ADDR INTC_IPRE +#define DMA1_IPR_POS 3 +#define DMA1_PRIORITY 7 + +/* DMAC(2) */ +#define DMTE4_IRQ 76 +#define DMTE5_IRQ 77 +#define DMA2_IPR_ADDR INTC_IPRF +#define DMA2_IPR_POS 2 +#define DMA2_PRIORITY 7 + +/* SCIF0 */ +#define SCIF_ERI_IRQ 80 +#define SCIF_RXI_IRQ 81 +#define SCIF_BRI_IRQ 82 +#define SCIF_TXI_IRQ 83 +#define SCIF_IPR_ADDR INTC_IPRG +#define SCIF_IPR_POS 3 +#define SCIF_PRIORITY 3 + +/* SIOF0 */ +#define SIOF0_IRQ 84 +#define SIOF0_IPR_ADDR INTC_IPRH +#define SIOF0_IPR_POS 3 +#define SIOF0_PRIORITY 3 + +/* FLCTL (Flash Memory Controller) */ +#define FLSTE_IRQ 92 +#define FLTEND_IRQ 93 +#define FLTRQ0_IRQ 94 +#define FLTRQ1_IRQ 95 +#define FLCTL_IPR_ADDR INTC_IPRH +#define FLCTL_IPR_POS 1 +#define FLCTL_PRIORITY 3 + +/* IIC(0) (IIC Bus Interface) */ +#define IIC0_ALI_IRQ 96 +#define IIC0_TACKI_IRQ 97 +#define IIC0_WAITI_IRQ 98 +#define IIC0_DTEI_IRQ 99 +#define IIC0_IPR_ADDR INTC_IPRH +#define IIC0_IPR_POS 0 +#define IIC0_PRIORITY 3 + +/* IIC(1) (IIC Bus Interface) */ +#define IIC1_ALI_IRQ 44 +#define IIC1_TACKI_IRQ 45 +#define IIC1_WAITI_IRQ 46 +#define IIC1_DTEI_IRQ 47 +#define IIC1_IPR_ADDR INTC_IPRG +#define IIC1_IPR_POS 0 +#define IIC1_PRIORITY 3 + +/* SIO0 */ +#define SIO0_IRQ 88 +#define SIO0_IPR_ADDR INTC_IPRI +#define SIO0_IPR_POS 3 +#define SIO0_PRIORITY 3 + +/* SDHI */ +#define SDHI_SDHII0_IRQ 100 +#define SDHI_SDHII1_IRQ 101 +#define SDHI_SDHII2_IRQ 102 +#define SDHI_SDHII3_IRQ 103 +#define SDHI_IPR_ADDR INTC_IPRK +#define SDHI_IPR_POS 0 +#define SDHI_PRIORITY 3 + +/* SIU (Sound Interface Unit) */ +#define SIU_IRQ 108 +#define SIU_IPR_ADDR INTC_IPRJ +#define SIU_IPR_POS 1 +#define SIU_PRIORITY 3 + +#define PORT_PACR 0xA4050100UL +#define PORT_PBCR 0xA4050102UL +#define PORT_PCCR 0xA4050104UL +#define PORT_PDCR 0xA4050106UL +#define PORT_PECR 0xA4050108UL +#define PORT_PFCR 0xA405010AUL +#define PORT_PGCR 0xA405010CUL +#define PORT_PHCR 0xA405010EUL +#define PORT_PJCR 0xA4050110UL +#define PORT_PKCR 0xA4050112UL +#define PORT_PLCR 0xA4050114UL +#define PORT_SCPCR 0xA4050116UL +#define PORT_PMCR 0xA4050118UL +#define PORT_PNCR 0xA405011AUL +#define PORT_PQCR 0xA405011CUL +#define PORT_PRCR 0xA405011EUL +#define PORT_PTCR 0xA405014CUL +#define PORT_PUCR 0xA405014EUL +#define PORT_PVCR 0xA4050150UL + +#define PORT_PSELA 0xA4050140UL +#define PORT_PSELB 0xA4050142UL +#define PORT_PSELC 0xA4050144UL +#define PORT_PSELE 0xA4050158UL + +#define PORT_HIZCRA 0xA4050146UL +#define PORT_HIZCRB 0xA4050148UL +#define PORT_DRVCR 0xA405014AUL + +#define PORT_PADR 0xA4050120UL +#define PORT_PBDR 0xA4050122UL +#define PORT_PCDR 0xA4050124UL +#define PORT_PDDR 0xA4050126UL +#define PORT_PEDR 0xA4050128UL +#define PORT_PFDR 0xA405012AUL +#define PORT_PGDR 0xA405012CUL +#define PORT_PHDR 0xA405012EUL +#define PORT_PJDR 0xA4050130UL +#define PORT_PKDR 0xA4050132UL +#define PORT_PLDR 0xA4050134UL +#define PORT_SCPDR 0xA4050136UL +#define PORT_PMDR 0xA4050138UL +#define PORT_PNDR 0xA405013AUL +#define PORT_PQDR 0xA405013CUL +#define PORT_PRDR 0xA405013EUL +#define PORT_PTDR 0xA405016CUL +#define PORT_PUDR 0xA405016EUL +#define PORT_PVDR 0xA4050170UL + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#define IRQ2_IRQ 34 +#define IRQ3_IRQ 35 +#define IRQ4_IRQ 36 +#define IRQ5_IRQ 37 +#define IRQ6_IRQ 38 +#define IRQ7_IRQ 39 + +#define INTPRI00 0xA4140010UL + +#define IRQ0_IPR_ADDR INTPRI00 +#define IRQ1_IPR_ADDR INTPRI00 +#define IRQ2_IPR_ADDR INTPRI00 +#define IRQ3_IPR_ADDR INTPRI00 +#define IRQ4_IPR_ADDR INTPRI00 +#define IRQ5_IPR_ADDR INTPRI00 +#define IRQ6_IPR_ADDR INTPRI00 +#define IRQ7_IPR_ADDR INTPRI00 + +#define IRQ0_IPR_POS 7 +#define IRQ1_IPR_POS 6 +#define IRQ2_IPR_POS 5 +#define IRQ3_IPR_POS 4 +#define IRQ4_IPR_POS 3 +#define IRQ5_IPR_POS 2 +#define IRQ6_IPR_POS 1 +#define IRQ7_IPR_POS 0 + +#define IRQ0_PRIORITY 1 +#define IRQ1_PRIORITY 1 +#define IRQ2_PRIORITY 1 +#define IRQ3_PRIORITY 1 +#define IRQ4_PRIORITY 1 +#define IRQ5_PRIORITY 1 +#define IRQ6_PRIORITY 1 +#define IRQ7_PRIORITY 1 + +#endif /* __ASM_SH_IRQ_SH73180_H */ diff --git a/trunk/include/asm-sh/irq-sh7343.h b/trunk/include/asm-sh/irq-sh7343.h new file mode 100644 index 000000000000..5d15419b53b0 --- /dev/null +++ b/trunk/include/asm-sh/irq-sh7343.h @@ -0,0 +1,317 @@ +#ifndef __ASM_SH_IRQ_SH7343_H +#define __ASM_SH_IRQ_SH7343_H + +/* + * linux/include/asm-sh/irq-sh7343.h + * + * Copyright (C) 2006 Kenati Technologies Inc. + * Andre Mccurdy + * Ranjit Deshpande + */ + +#undef INTC_IPRA +#undef INTC_IPRB +#undef INTC_IPRC +#undef INTC_IPRD + +#undef DMTE0_IRQ +#undef DMTE1_IRQ +#undef DMTE2_IRQ +#undef DMTE3_IRQ +#undef DMTE4_IRQ +#undef DMTE5_IRQ +#undef DMTE6_IRQ +#undef DMTE7_IRQ +#undef DMAE_IRQ +#undef DMA_IPR_ADDR +#undef DMA_IPR_POS +#undef DMA_PRIORITY + +#undef INTC_IMCR0 +#undef INTC_IMCR1 +#undef INTC_IMCR2 +#undef INTC_IMCR3 +#undef INTC_IMCR4 +#undef INTC_IMCR5 +#undef INTC_IMCR6 +#undef INTC_IMCR7 +#undef INTC_IMCR8 +#undef INTC_IMCR9 +#undef INTC_IMCR10 + + +#define INTC_IPRA 0xA4080000UL +#define INTC_IPRB 0xA4080004UL +#define INTC_IPRC 0xA4080008UL +#define INTC_IPRD 0xA408000CUL +#define INTC_IPRE 0xA4080010UL +#define INTC_IPRF 0xA4080014UL +#define INTC_IPRG 0xA4080018UL +#define INTC_IPRH 0xA408001CUL +#define INTC_IPRI 0xA4080020UL +#define INTC_IPRJ 0xA4080024UL +#define INTC_IPRK 0xA4080028UL +#define INTC_IPRL 0xA408002CUL + +#define INTC_IMR0 0xA4080080UL +#define INTC_IMR1 0xA4080084UL +#define INTC_IMR2 0xA4080088UL +#define INTC_IMR3 0xA408008CUL +#define INTC_IMR4 0xA4080090UL +#define INTC_IMR5 0xA4080094UL +#define INTC_IMR6 0xA4080098UL +#define INTC_IMR7 0xA408009CUL +#define INTC_IMR8 0xA40800A0UL +#define INTC_IMR9 0xA40800A4UL +#define INTC_IMR10 0xA40800A8UL +#define INTC_IMR11 0xA40800ACUL + +#define INTC_IMCR0 0xA40800C0UL +#define INTC_IMCR1 0xA40800C4UL +#define INTC_IMCR2 0xA40800C8UL +#define INTC_IMCR3 0xA40800CCUL +#define INTC_IMCR4 0xA40800D0UL +#define INTC_IMCR5 0xA40800D4UL +#define INTC_IMCR6 0xA40800D8UL +#define INTC_IMCR7 0xA40800DCUL +#define INTC_IMCR8 0xA40800E0UL +#define INTC_IMCR9 0xA40800E4UL +#define INTC_IMCR10 0xA40800E8UL +#define INTC_IMCR11 0xA40800ECUL + +#define INTC_ICR0 0xA4140000UL +#define INTC_ICR1 0xA414001CUL + +#define INTMSK0 0xa4140044 +#define INTMSKCLR0 0xa4140064 +#define INTC_INTPRI0 0xa4140010 + +/* + NOTE: + + *_IRQ = (INTEVT2 - 0x200)/0x20 +*/ + +/* TMU0 */ +#define TMU0_IRQ 16 +#define TMU0_IPR_ADDR INTC_IPRA +#define TMU0_IPR_POS 3 +#define TMU0_PRIORITY 2 + +#define TIMER_IRQ 16 +#define TIMER_IPR_ADDR INTC_IPRA +#define TIMER_IPR_POS 3 +#define TIMER_PRIORITY 2 + +/* TMU1 */ +#define TMU1_IRQ 17 +#define TMU1_IPR_ADDR INTC_IPRA +#define TMU1_IPR_POS 2 +#define TMU1_PRIORITY 2 + +/* TMU2 */ +#define TMU2_IRQ 18 +#define TMU2_IPR_ADDR INTC_IPRA +#define TMU2_IPR_POS 1 +#define TMU2_PRIORITY 2 + +/* LCDC */ +#define LCDC_IRQ 28 +#define LCDC_IPR_ADDR INTC_IPRB +#define LCDC_IPR_POS 2 +#define LCDC_PRIORITY 2 + +/* VIO (Video I/O) */ +#define CEU_IRQ 52 +#define BEU_IRQ 53 +#define VEU_IRQ 54 +#define VOU_IRQ 55 +#define VIO_IPR_ADDR INTC_IPRE +#define VIO_IPR_POS 2 +#define VIO_PRIORITY 2 + +/* MFI (Multi Functional Interface) */ +#define MFI_IRQ 56 +#define MFI_IPR_ADDR INTC_IPRE +#define MFI_IPR_POS 1 +#define MFI_PRIORITY 2 + +/* VPU (Video Processing Unit) */ +#define VPU_IRQ 60 +#define VPU_IPR_ADDR INTC_IPRE +#define VPU_IPR_POS 0 +#define VPU_PRIORITY 2 + +/* 3DG */ +#define TDG_IRQ 63 +#define TDG_IPR_ADDR INTC_IPRJ +#define TDG_IPR_POS 2 +#define TDG_PRIORITY 2 + +/* DMAC(1) */ +#define DMTE0_IRQ 48 +#define DMTE1_IRQ 49 +#define DMTE2_IRQ 50 +#define DMTE3_IRQ 51 +#define DMA1_IPR_ADDR INTC_IPRE +#define DMA1_IPR_POS 3 +#define DMA1_PRIORITY 7 + +/* DMAC(2) */ +#define DMTE4_IRQ 76 +#define DMTE5_IRQ 77 +#define DMA2_IPR_ADDR INTC_IPRF +#define DMA2_IPR_POS 2 +#define DMA2_PRIORITY 7 + +/* SCIF0 */ +#define SCIF_ERI_IRQ 80 +#define SCIF_RXI_IRQ 81 +#define SCIF_BRI_IRQ 82 +#define SCIF_TXI_IRQ 83 +#define SCIF_IPR_ADDR INTC_IPRG +#define SCIF_IPR_POS 3 +#define SCIF_PRIORITY 3 + +/* SIOF0 */ +#define SIOF0_IRQ 84 +#define SIOF0_IPR_ADDR INTC_IPRH +#define SIOF0_IPR_POS 3 +#define SIOF0_PRIORITY 3 + +/* FLCTL (Flash Memory Controller) */ +#define FLSTE_IRQ 92 +#define FLTEND_IRQ 93 +#define FLTRQ0_IRQ 94 +#define FLTRQ1_IRQ 95 +#define FLCTL_IPR_ADDR INTC_IPRH +#define FLCTL_IPR_POS 1 +#define FLCTL_PRIORITY 3 + +/* IIC(0) (IIC Bus Interface) */ +#define IIC0_ALI_IRQ 96 +#define IIC0_TACKI_IRQ 97 +#define IIC0_WAITI_IRQ 98 +#define IIC0_DTEI_IRQ 99 +#define IIC0_IPR_ADDR INTC_IPRH +#define IIC0_IPR_POS 0 +#define IIC0_PRIORITY 3 + +/* IIC(1) (IIC Bus Interface) */ +#define IIC1_ALI_IRQ 44 +#define IIC1_TACKI_IRQ 45 +#define IIC1_WAITI_IRQ 46 +#define IIC1_DTEI_IRQ 47 +#define IIC1_IPR_ADDR INTC_IPRI +#define IIC1_IPR_POS 0 +#define IIC1_PRIORITY 3 + +/* SIO0 */ +#define SIO0_IRQ 88 +#define SIO0_IPR_ADDR INTC_IPRI +#define SIO0_IPR_POS 3 +#define SIO0_PRIORITY 3 + +/* SDHI */ +#define SDHI_SDHII0_IRQ 100 +#define SDHI_SDHII1_IRQ 101 +#define SDHI_SDHII2_IRQ 102 +#define SDHI_SDHII3_IRQ 103 +#define SDHI_IPR_ADDR INTC_IPRK +#define SDHI_IPR_POS 0 +#define SDHI_PRIORITY 3 + +/* SIU (Sound Interface Unit) */ +#define SIU_IRQ 108 +#define SIU_IPR_ADDR INTC_IPRJ +#define SIU_IPR_POS 1 +#define SIU_PRIORITY 3 + +#define PORT_PACR 0xA4050100UL +#define PORT_PBCR 0xA4050102UL +#define PORT_PCCR 0xA4050104UL +#define PORT_PDCR 0xA4050106UL +#define PORT_PECR 0xA4050108UL +#define PORT_PFCR 0xA405010AUL +#define PORT_PGCR 0xA405010CUL +#define PORT_PHCR 0xA405010EUL +#define PORT_PJCR 0xA4050110UL +#define PORT_PKCR 0xA4050112UL +#define PORT_PLCR 0xA4050114UL +#define PORT_SCPCR 0xA4050116UL +#define PORT_PMCR 0xA4050118UL +#define PORT_PNCR 0xA405011AUL +#define PORT_PQCR 0xA405011CUL +#define PORT_PRCR 0xA405011EUL +#define PORT_PTCR 0xA405014CUL +#define PORT_PUCR 0xA405014EUL +#define PORT_PVCR 0xA4050150UL + +#define PORT_PSELA 0xA4050140UL +#define PORT_PSELB 0xA4050142UL +#define PORT_PSELC 0xA4050144UL +#define PORT_PSELE 0xA4050158UL + +#define PORT_HIZCRA 0xA4050146UL +#define PORT_HIZCRB 0xA4050148UL +#define PORT_DRVCR 0xA405014AUL + +#define PORT_PADR 0xA4050120UL +#define PORT_PBDR 0xA4050122UL +#define PORT_PCDR 0xA4050124UL +#define PORT_PDDR 0xA4050126UL +#define PORT_PEDR 0xA4050128UL +#define PORT_PFDR 0xA405012AUL +#define PORT_PGDR 0xA405012CUL +#define PORT_PHDR 0xA405012EUL +#define PORT_PJDR 0xA4050130UL +#define PORT_PKDR 0xA4050132UL +#define PORT_PLDR 0xA4050134UL +#define PORT_SCPDR 0xA4050136UL +#define PORT_PMDR 0xA4050138UL +#define PORT_PNDR 0xA405013AUL +#define PORT_PQDR 0xA405013CUL +#define PORT_PRDR 0xA405013EUL +#define PORT_PTDR 0xA405016CUL +#define PORT_PUDR 0xA405016EUL +#define PORT_PVDR 0xA4050170UL + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#define IRQ2_IRQ 34 +#define IRQ3_IRQ 35 +#define IRQ4_IRQ 36 +#define IRQ5_IRQ 37 +#define IRQ6_IRQ 38 +#define IRQ7_IRQ 39 + +#define INTPRI00 0xA4140010UL + +#define IRQ0_IPR_ADDR INTPRI00 +#define IRQ1_IPR_ADDR INTPRI00 +#define IRQ2_IPR_ADDR INTPRI00 +#define IRQ3_IPR_ADDR INTPRI00 +#define IRQ4_IPR_ADDR INTPRI00 +#define IRQ5_IPR_ADDR INTPRI00 +#define IRQ6_IPR_ADDR INTPRI00 +#define IRQ7_IPR_ADDR INTPRI00 + +#define IRQ0_IPR_POS 7 +#define IRQ1_IPR_POS 6 +#define IRQ2_IPR_POS 5 +#define IRQ3_IPR_POS 4 +#define IRQ4_IPR_POS 3 +#define IRQ5_IPR_POS 2 +#define IRQ6_IPR_POS 1 +#define IRQ7_IPR_POS 0 + +#define IRQ0_PRIORITY 1 +#define IRQ1_PRIORITY 1 +#define IRQ2_PRIORITY 1 +#define IRQ3_PRIORITY 1 +#define IRQ4_PRIORITY 1 +#define IRQ5_PRIORITY 1 +#define IRQ6_PRIORITY 1 +#define IRQ7_PRIORITY 1 + +#endif /* __ASM_SH_IRQ_SH7343_H */ diff --git a/trunk/include/asm-sh/irq-sh7780.h b/trunk/include/asm-sh/irq-sh7780.h new file mode 100644 index 000000000000..19912ae6a7f7 --- /dev/null +++ b/trunk/include/asm-sh/irq-sh7780.h @@ -0,0 +1,311 @@ +#ifndef __ASM_SH_IRQ_SH7780_H +#define __ASM_SH_IRQ_SH7780_H + +/* + * linux/include/asm-sh/irq-sh7780.h + * + * Copyright (C) 2004 Takashi SHUDO + */ +#define INTC_BASE 0xffd00000 +#define INTC_ICR0 (INTC_BASE+0x0) +#define INTC_ICR1 (INTC_BASE+0x1c) +#define INTC_INTPRI (INTC_BASE+0x10) +#define INTC_INTREQ (INTC_BASE+0x24) +#define INTC_INTMSK0 (INTC_BASE+0x44) +#define INTC_INTMSK1 (INTC_BASE+0x48) +#define INTC_INTMSK2 (INTC_BASE+0x40080) +#define INTC_INTMSKCLR0 (INTC_BASE+0x64) +#define INTC_INTMSKCLR1 (INTC_BASE+0x68) +#define INTC_INTMSKCLR2 (INTC_BASE+0x40084) +#define INTC_NMIFCR (INTC_BASE+0xc0) +#define INTC_USERIMASK (INTC_BASE+0x30000) + +#define INTC_INT2PRI0 (INTC_BASE+0x40000) +#define INTC_INT2PRI1 (INTC_BASE+0x40004) +#define INTC_INT2PRI2 (INTC_BASE+0x40008) +#define INTC_INT2PRI3 (INTC_BASE+0x4000c) +#define INTC_INT2PRI4 (INTC_BASE+0x40010) +#define INTC_INT2PRI5 (INTC_BASE+0x40014) +#define INTC_INT2PRI6 (INTC_BASE+0x40018) +#define INTC_INT2PRI7 (INTC_BASE+0x4001c) +#define INTC_INT2A0 (INTC_BASE+0x40030) +#define INTC_INT2A1 (INTC_BASE+0x40034) +#define INTC_INT2MSKR (INTC_BASE+0x40038) +#define INTC_INT2MSKCR (INTC_BASE+0x4003c) +#define INTC_INT2B0 (INTC_BASE+0x40040) +#define INTC_INT2B1 (INTC_BASE+0x40044) +#define INTC_INT2B2 (INTC_BASE+0x40048) +#define INTC_INT2B3 (INTC_BASE+0x4004c) +#define INTC_INT2B4 (INTC_BASE+0x40050) +#define INTC_INT2B5 (INTC_BASE+0x40054) +#define INTC_INT2B6 (INTC_BASE+0x40058) +#define INTC_INT2B7 (INTC_BASE+0x4005c) +#define INTC_INT2GPIC (INTC_BASE+0x40090) +/* + NOTE: + *_IRQ = (INTEVT2 - 0x200)/0x20 +*/ +/* IRQ 0-7 line external int*/ +#define IRQ0_IRQ 2 +#define IRQ0_IPR_ADDR INTC_INTPRI +#define IRQ0_IPR_POS 7 +#define IRQ0_PRIORITY 2 + +#define IRQ1_IRQ 4 +#define IRQ1_IPR_ADDR INTC_INTPRI +#define IRQ1_IPR_POS 6 +#define IRQ1_PRIORITY 2 + +#define IRQ2_IRQ 6 +#define IRQ2_IPR_ADDR INTC_INTPRI +#define IRQ2_IPR_POS 5 +#define IRQ2_PRIORITY 2 + +#define IRQ3_IRQ 8 +#define IRQ3_IPR_ADDR INTC_INTPRI +#define IRQ3_IPR_POS 4 +#define IRQ3_PRIORITY 2 + +#define IRQ4_IRQ 10 +#define IRQ4_IPR_ADDR INTC_INTPRI +#define IRQ4_IPR_POS 3 +#define IRQ4_PRIORITY 2 + +#define IRQ5_IRQ 12 +#define IRQ5_IPR_ADDR INTC_INTPRI +#define IRQ5_IPR_POS 2 +#define IRQ5_PRIORITY 2 + +#define IRQ6_IRQ 14 +#define IRQ6_IPR_ADDR INTC_INTPRI +#define IRQ6_IPR_POS 1 +#define IRQ6_PRIORITY 2 + +#define IRQ7_IRQ 0 +#define IRQ7_IPR_ADDR INTC_INTPRI +#define IRQ7_IPR_POS 0 +#define IRQ7_PRIORITY 2 + +/* TMU */ +/* ch0 */ +#define TMU_IRQ 28 +#define TMU_IPR_ADDR INTC_INT2PRI0 +#define TMU_IPR_POS 3 +#define TMU_PRIORITY 2 + +#define TIMER_IRQ 28 +#define TIMER_IPR_ADDR INTC_INT2PRI0 +#define TIMER_IPR_POS 3 +#define TIMER_PRIORITY 2 + +/* ch 1*/ +#define TMU_CH1_IRQ 29 +#define TMU_CH1_IPR_ADDR INTC_INT2PRI0 +#define TMU_CH1_IPR_POS 2 +#define TMU_CH1_PRIORITY 2 + +#define TIMER1_IRQ 29 +#define TIMER1_IPR_ADDR INTC_INT2PRI0 +#define TIMER1_IPR_POS 2 +#define TIMER1_PRIORITY 2 + +/* ch 2*/ +#define TMU_CH2_IRQ 30 +#define TMU_CH2_IPR_ADDR INTC_INT2PRI0 +#define TMU_CH2_IPR_POS 1 +#define TMU_CH2_PRIORITY 2 +/* ch 2 Input capture */ +#define TMU_CH2IC_IRQ 31 +#define TMU_CH2IC_IPR_ADDR INTC_INT2PRI0 +#define TMU_CH2IC_IPR_POS 0 +#define TMU_CH2IC_PRIORITY 2 +/* ch 3 */ +#define TMU_CH3_IRQ 96 +#define TMU_CH3_IPR_ADDR INTC_INT2PRI1 +#define TMU_CH3_IPR_POS 3 +#define TMU_CH3_PRIORITY 2 +/* ch 4 */ +#define TMU_CH4_IRQ 97 +#define TMU_CH4_IPR_ADDR INTC_INT2PRI1 +#define TMU_CH4_IPR_POS 2 +#define TMU_CH4_PRIORITY 2 +/* ch 5*/ +#define TMU_CH5_IRQ 98 +#define TMU_CH5_IPR_ADDR INTC_INT2PRI1 +#define TMU_CH5_IPR_POS 1 +#define TMU_CH5_PRIORITY 2 + +/* SCIF0 */ +#define SCIF0_ERI_IRQ 40 +#define SCIF0_RXI_IRQ 41 +#define SCIF0_BRI_IRQ 42 +#define SCIF0_TXI_IRQ 43 +#define SCIF0_IPR_ADDR INTC_INT2PRI2 +#define SCIF0_IPR_POS 3 +#define SCIF0_PRIORITY 3 + +/* SCIF1 */ +#define SCIF1_ERI_IRQ 76 +#define SCIF1_RXI_IRQ 77 +#define SCIF1_BRI_IRQ 78 +#define SCIF1_TXI_IRQ 79 +#define SCIF1_IPR_ADDR INTC_INT2PRI2 +#define SCIF1_IPR_POS 2 +#define SCIF1_PRIORITY 3 + +#define WDT_IRQ 27 +#define WDT_IPR_ADDR INTC_INT2PRI2 +#define WDT_IPR_POS 1 +#define WDT_PRIORITY 2 + +/* DMAC(0) */ +#define DMINT0_IRQ 34 +#define DMINT1_IRQ 35 +#define DMINT2_IRQ 36 +#define DMINT3_IRQ 37 +#define DMINT4_IRQ 44 +#define DMINT5_IRQ 45 +#define DMINT6_IRQ 46 +#define DMINT7_IRQ 47 +#define DMAE_IRQ 38 +#define DMA0_IPR_ADDR INTC_INT2PRI3 +#define DMA0_IPR_POS 2 +#define DMA0_PRIORITY 7 + +/* DMAC(1) */ +#define DMINT8_IRQ 92 +#define DMINT9_IRQ 93 +#define DMINT10_IRQ 94 +#define DMINT11_IRQ 95 +#define DMA1_IPR_ADDR INTC_INT2PRI3 +#define DMA1_IPR_POS 1 +#define DMA1_PRIORITY 7 + +#define DMTE0_IRQ DMINT0_IRQ +#define DMTE4_IRQ DMINT4_IRQ +#define DMA_IPR_ADDR DMA0_IPR_ADDR +#define DMA_IPR_POS DMA0_IPR_POS +#define DMA_PRIORITY DMA0_PRIORITY + +/* CMT */ +#define CMT_IRQ 56 +#define CMT_IPR_ADDR INTC_INT2PRI4 +#define CMT_IPR_POS 3 +#define CMT_PRIORITY 0 + +/* HAC */ +#define HAC_IRQ 60 +#define HAC_IPR_ADDR INTC_INT2PRI4 +#define HAC_IPR_POS 2 +#define CMT_PRIORITY 0 + +/* PCIC(0) */ +#define PCIC0_IRQ 64 +#define PCIC0_IPR_ADDR INTC_INT2PRI4 +#define PCIC0_IPR_POS 1 +#define PCIC0_PRIORITY 2 + +/* PCIC(1) */ +#define PCIC1_IRQ 65 +#define PCIC1_IPR_ADDR INTC_INT2PRI4 +#define PCIC1_IPR_POS 0 +#define PCIC1_PRIORITY 2 + +/* PCIC(2) */ +#define PCIC2_IRQ 66 +#define PCIC2_IPR_ADDR INTC_INT2PRI5 +#define PCIC2_IPR_POS 3 +#define PCIC2_PRIORITY 2 + +/* PCIC(3) */ +#define PCIC3_IRQ 67 +#define PCIC3_IPR_ADDR INTC_INT2PRI5 +#define PCIC3_IPR_POS 2 +#define PCIC3_PRIORITY 2 + +/* PCIC(4) */ +#define PCIC4_IRQ 68 +#define PCIC4_IPR_ADDR INTC_INT2PRI5 +#define PCIC4_IPR_POS 1 +#define PCIC4_PRIORITY 2 + +/* PCIC(5) */ +#define PCICERR_IRQ 69 +#define PCICPWD3_IRQ 70 +#define PCICPWD2_IRQ 71 +#define PCICPWD1_IRQ 72 +#define PCICPWD0_IRQ 73 +#define PCIC5_IPR_ADDR INTC_INT2PRI5 +#define PCIC5_IPR_POS 0 +#define PCIC5_PRIORITY 2 + +/* SIOF */ +#define SIOF_IRQ 80 +#define SIOF_IPR_ADDR INTC_INT2PRI6 +#define SIOF_IPR_POS 3 +#define SIOF_PRIORITY 3 + +/* HSPI */ +#define HSPI_IRQ 84 +#define HSPI_IPR_ADDR INTC_INT2PRI6 +#define HSPI_IPR_POS 2 +#define HSPI_PRIORITY 3 + +/* MMCIF */ +#define MMCIF_FSTAT_IRQ 88 +#define MMCIF_TRAN_IRQ 89 +#define MMCIF_ERR_IRQ 90 +#define MMCIF_FRDY_IRQ 91 +#define MMCIF_IPR_ADDR INTC_INT2PRI6 +#define MMCIF_IPR_POS 1 +#define HSPI_PRIORITY 3 + +/* SSI */ +#define SSI_IRQ 100 +#define SSI_IPR_ADDR INTC_INT2PRI6 +#define SSI_IPR_POS 0 +#define SSI_PRIORITY 3 + +/* FLCTL */ +#define FLCTL_FLSTE_IRQ 104 +#define FLCTL_FLTEND_IRQ 105 +#define FLCTL_FLTRQ0_IRQ 106 +#define FLCTL_FLTRQ1_IRQ 107 +#define FLCTL_IPR_ADDR INTC_INT2PRI7 +#define FLCTL_IPR_POS 3 +#define FLCTL_PRIORITY 3 + +/* GPIO */ +#define GPIO0_IRQ 108 +#define GPIO1_IRQ 109 +#define GPIO2_IRQ 110 +#define GPIO3_IRQ 111 +#define GPIO_IPR_ADDR INTC_INT2PRI7 +#define GPIO_IPR_POS 2 +#define GPIO_PRIORITY 3 + +#define INTC_TMU0_MSK 0 +#define INTC_TMU3_MSK 1 +#define INTC_RTC_MSK 2 +#define INTC_SCIF0_MSK 3 +#define INTC_SCIF1_MSK 4 +#define INTC_WDT_MSK 5 +#define INTC_HUID_MSK 7 +#define INTC_DMAC0_MSK 8 +#define INTC_DMAC1_MSK 9 +#define INTC_CMT_MSK 12 +#define INTC_HAC_MSK 13 +#define INTC_PCIC0_MSK 14 +#define INTC_PCIC1_MSK 15 +#define INTC_PCIC2_MSK 16 +#define INTC_PCIC3_MSK 17 +#define INTC_PCIC4_MSK 18 +#define INTC_PCIC5_MSK 19 +#define INTC_SIOF_MSK 20 +#define INTC_HSPI_MSK 21 +#define INTC_MMCIF_MSK 22 +#define INTC_SSI_MSK 23 +#define INTC_FLCTL_MSK 24 +#define INTC_GPIO_MSK 25 + +#endif /* __ASM_SH_IRQ_SH7780_H */ diff --git a/trunk/include/asm-sh/irq.h b/trunk/include/asm-sh/irq.h index fd576088e47e..6cd3e9e2a76a 100644 --- a/trunk/include/asm-sh/irq.h +++ b/trunk/include/asm-sh/irq.h @@ -1,9 +1,233 @@ #ifndef __ASM_SH_IRQ_H #define __ASM_SH_IRQ_H +/* + * + * linux/include/asm-sh/irq.h + * + * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi + * Copyright (C) 2000 Kazumoto Kojima + * Copyright (C) 2003 Paul Mundt + * + */ + #include #include /* for pt_regs */ +#ifndef CONFIG_CPU_SUBTYPE_SH7780 + +#define INTC_DMAC0_MSK 0 + +#if defined(CONFIG_CPU_SH3) +#define INTC_IPRA 0xfffffee2UL +#define INTC_IPRB 0xfffffee4UL +#elif defined(CONFIG_CPU_SH4) +#define INTC_IPRA 0xffd00004UL +#define INTC_IPRB 0xffd00008UL +#define INTC_IPRC 0xffd0000cUL +#define INTC_IPRD 0xffd00010UL +#endif + +#define TIMER_IRQ 16 +#define TIMER_IPR_ADDR INTC_IPRA +#define TIMER_IPR_POS 3 +#define TIMER_PRIORITY 2 + +#define TIMER1_IRQ 17 +#define TIMER1_IPR_ADDR INTC_IPRA +#define TIMER1_IPR_POS 2 +#define TIMER1_PRIORITY 4 + +#define RTC_IRQ 22 +#define RTC_IPR_ADDR INTC_IPRA +#define RTC_IPR_POS 0 +#define RTC_PRIORITY TIMER_PRIORITY + +#if defined(CONFIG_CPU_SH3) +#define DMTE0_IRQ 48 +#define DMTE1_IRQ 49 +#define DMTE2_IRQ 50 +#define DMTE3_IRQ 51 +#define DMA_IPR_ADDR INTC_IPRE +#define DMA_IPR_POS 3 +#define DMA_PRIORITY 7 +#if defined(CONFIG_CPU_SUBTYPE_SH7300) +/* TMU2 */ +#define TIMER2_IRQ 18 +#define TIMER2_IPR_ADDR INTC_IPRA +#define TIMER2_IPR_POS 1 +#define TIMER2_PRIORITY 2 + +/* WDT */ +#define WDT_IRQ 27 +#define WDT_IPR_ADDR INTC_IPRB +#define WDT_IPR_POS 3 +#define WDT_PRIORITY 2 + +/* SIM (SIM Card Module) */ +#define SIM_ERI_IRQ 23 +#define SIM_RXI_IRQ 24 +#define SIM_TXI_IRQ 25 +#define SIM_TEND_IRQ 26 +#define SIM_IPR_ADDR INTC_IPRB +#define SIM_IPR_POS 1 +#define SIM_PRIORITY 2 + +/* VIO (Video I/O) */ +#define VIO_IRQ 52 +#define VIO_IPR_ADDR INTC_IPRE +#define VIO_IPR_POS 2 +#define VIO_PRIORITY 2 + +/* MFI (Multi Functional Interface) */ +#define MFI_IRQ 56 +#define MFI_IPR_ADDR INTC_IPRE +#define MFI_IPR_POS 1 +#define MFI_PRIORITY 2 + +/* VPU (Video Processing Unit) */ +#define VPU_IRQ 60 +#define VPU_IPR_ADDR INTC_IPRE +#define VPU_IPR_POS 0 +#define VPU_PRIORITY 2 + +/* KEY (Key Scan Interface) */ +#define KEY_IRQ 79 +#define KEY_IPR_ADDR INTC_IPRF +#define KEY_IPR_POS 3 +#define KEY_PRIORITY 2 + +/* CMT (Compare Match Timer) */ +#define CMT_IRQ 104 +#define CMT_IPR_ADDR INTC_IPRF +#define CMT_IPR_POS 0 +#define CMT_PRIORITY 2 + +/* DMAC(1) */ +#define DMTE0_IRQ 48 +#define DMTE1_IRQ 49 +#define DMTE2_IRQ 50 +#define DMTE3_IRQ 51 +#define DMA1_IPR_ADDR INTC_IPRE +#define DMA1_IPR_POS 3 +#define DMA1_PRIORITY 7 + +/* DMAC(2) */ +#define DMTE4_IRQ 76 +#define DMTE5_IRQ 77 +#define DMA2_IPR_ADDR INTC_IPRF +#define DMA2_IPR_POS 2 +#define DMA2_PRIORITY 7 + +/* SIOF0 */ +#define SIOF0_IRQ 84 +#define SIOF0_IPR_ADDR INTC_IPRH +#define SIOF0_IPR_POS 3 +#define SIOF0_PRIORITY 3 + +/* FLCTL (Flash Memory Controller) */ +#define FLSTE_IRQ 92 +#define FLTEND_IRQ 93 +#define FLTRQ0_IRQ 94 +#define FLTRQ1_IRQ 95 +#define FLCTL_IPR_ADDR INTC_IPRH +#define FLCTL_IPR_POS 1 +#define FLCTL_PRIORITY 3 + +/* IIC (IIC Bus Interface) */ +#define IIC_ALI_IRQ 96 +#define IIC_TACKI_IRQ 97 +#define IIC_WAITI_IRQ 98 +#define IIC_DTEI_IRQ 99 +#define IIC_IPR_ADDR INTC_IPRH +#define IIC_IPR_POS 0 +#define IIC_PRIORITY 3 + +/* SIO0 */ +#define SIO0_IRQ 88 +#define SIO0_IPR_ADDR INTC_IPRI +#define SIO0_IPR_POS 3 +#define SIO0_PRIORITY 3 + +/* SIU (Sound Interface Unit) */ +#define SIU_IRQ 108 +#define SIU_IPR_ADDR INTC_IPRJ +#define SIU_IPR_POS 1 +#define SIU_PRIORITY 3 + +#endif +#elif defined(CONFIG_CPU_SH4) +#define DMTE0_IRQ 34 +#define DMTE1_IRQ 35 +#define DMTE2_IRQ 36 +#define DMTE3_IRQ 37 +#define DMTE4_IRQ 44 /* 7751R only */ +#define DMTE5_IRQ 45 /* 7751R only */ +#define DMTE6_IRQ 46 /* 7751R only */ +#define DMTE7_IRQ 47 /* 7751R only */ +#define DMAE_IRQ 38 +#define DMA_IPR_ADDR INTC_IPRC +#define DMA_IPR_POS 2 +#define DMA_PRIORITY 7 +#endif + +#if defined (CONFIG_CPU_SUBTYPE_SH7707) || defined (CONFIG_CPU_SUBTYPE_SH7708) || \ + defined (CONFIG_CPU_SUBTYPE_SH7709) || defined (CONFIG_CPU_SUBTYPE_SH7750) || \ + defined (CONFIG_CPU_SUBTYPE_SH7751) || defined (CONFIG_CPU_SUBTYPE_SH7706) +#define SCI_ERI_IRQ 23 +#define SCI_RXI_IRQ 24 +#define SCI_TXI_IRQ 25 +#define SCI_IPR_ADDR INTC_IPRB +#define SCI_IPR_POS 1 +#define SCI_PRIORITY 3 +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7300) +#define SCIF0_IRQ 80 +#define SCIF0_IPR_ADDR INTC_IPRG +#define SCIF0_IPR_POS 3 +#define SCIF0_PRIORITY 3 +#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ + defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) +#define SCIF_ERI_IRQ 56 +#define SCIF_RXI_IRQ 57 +#define SCIF_BRI_IRQ 58 +#define SCIF_TXI_IRQ 59 +#define SCIF_IPR_ADDR INTC_IPRE +#define SCIF_IPR_POS 1 +#define SCIF_PRIORITY 3 + +#define IRDA_ERI_IRQ 52 +#define IRDA_RXI_IRQ 53 +#define IRDA_BRI_IRQ 54 +#define IRDA_TXI_IRQ 55 +#define IRDA_IPR_ADDR INTC_IPRE +#define IRDA_IPR_POS 2 +#define IRDA_PRIORITY 3 +#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202) +#define SCIF_ERI_IRQ 40 +#define SCIF_RXI_IRQ 41 +#define SCIF_BRI_IRQ 42 +#define SCIF_TXI_IRQ 43 +#define SCIF_IPR_ADDR INTC_IPRC +#define SCIF_IPR_POS 1 +#define SCIF_PRIORITY 3 +#if defined(CONFIG_CPU_SUBTYPE_ST40STB1) +#define SCIF1_ERI_IRQ 23 +#define SCIF1_RXI_IRQ 24 +#define SCIF1_BRI_IRQ 25 +#define SCIF1_TXI_IRQ 26 +#define SCIF1_IPR_ADDR INTC_IPRB +#define SCIF1_IPR_POS 1 +#define SCIF1_PRIORITY 3 +#endif /* ST40STB1 */ + +#endif /* 775x / SH4-202 / ST40STB1 */ +#endif /* 7780 */ + /* NR_IRQS is made from three components: * 1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules * 2. PINT_NR_IRQS - number of PINT interrupts @@ -41,10 +265,6 @@ # define ONCHIP_NR_IRQS 109 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) # define ONCHIP_NR_IRQS 111 -#elif defined(CONFIG_CPU_SUBTYPE_SH7206) -# define ONCHIP_NR_IRQS 256 -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -# define ONCHIP_NR_IRQS 128 #elif defined(CONFIG_SH_UNKNOWN) /* Most be last */ # define ONCHIP_NR_IRQS 144 #endif @@ -92,11 +312,9 @@ /* NR_IRQS. 1+2+3 */ #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS) -/* - * Convert back and forth between INTEVT and IRQ values. - */ -#define evt2irq(evt) (((evt) >> 5) - 16) -#define irq2evt(irq) (((irq) + 16) << 5) +extern void disable_irq(unsigned int); +extern void disable_irq_nosync(unsigned int); +extern void enable_irq(unsigned int); /* * Simple Mask Register Support @@ -109,36 +327,362 @@ extern unsigned short *irq_mask_register; */ void init_IRQ_pint(void); -/* - * The shift value is now the number of bits to shift, not the number of - * bits/4. This is to make it easier to read the value directly from the - * datasheets. The IPR address, addr, will be set from ipr_idx via the - * map_ipridx_to_addr function. - */ struct ipr_data { unsigned int irq; - int ipr_idx; /* Index for the IPR registered */ - int shift; /* Number of bits to shift the data */ - int priority; /* The priority */ unsigned int addr; /* Address of Interrupt Priority Register */ + int shift; /* Shifts of the 16-bit data */ + int priority; /* The priority */ }; /* - * Given an IPR IDX, map the value to an IPR register address. + * Function for "on chip support modules". */ -unsigned int map_ipridx_to_addr(int idx); +extern void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs); +extern void make_imask_irq(unsigned int irq); -/* - * Enable individual interrupt mode for external IPR IRQs. - */ -void ipr_irq_enable_irlm(void); +#if defined(CONFIG_CPU_SUBTYPE_SH7300) +#undef INTC_IPRA +#undef INTC_IPRB +#define INTC_IPRA 0xA414FEE2UL +#define INTC_IPRB 0xA414FEE4UL +#define INTC_IPRC 0xA4140016UL +#define INTC_IPRD 0xA4140018UL +#define INTC_IPRE 0xA414001AUL +#define INTC_IPRF 0xA4080000UL +#define INTC_IPRG 0xA4080002UL +#define INTC_IPRH 0xA4080004UL +#define INTC_IPRI 0xA4080006UL +#define INTC_IPRJ 0xA4080008UL -/* - * Function for "on chip support modules". - */ -void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs); -void make_imask_irq(unsigned int irq); -void init_IRQ_ipr(void); +#define INTC_IMR0 0xA4080040UL +#define INTC_IMR1 0xA4080042UL +#define INTC_IMR2 0xA4080044UL +#define INTC_IMR3 0xA4080046UL +#define INTC_IMR4 0xA4080048UL +#define INTC_IMR5 0xA408004AUL +#define INTC_IMR6 0xA408004CUL +#define INTC_IMR7 0xA408004EUL +#define INTC_IMR8 0xA4080050UL +#define INTC_IMR9 0xA4080052UL +#define INTC_IMR10 0xA4080054UL + +#define INTC_IMCR0 0xA4080060UL +#define INTC_IMCR1 0xA4080062UL +#define INTC_IMCR2 0xA4080064UL +#define INTC_IMCR3 0xA4080066UL +#define INTC_IMCR4 0xA4080068UL +#define INTC_IMCR5 0xA408006AUL +#define INTC_IMCR6 0xA408006CUL +#define INTC_IMCR7 0xA408006EUL +#define INTC_IMCR8 0xA4080070UL +#define INTC_IMCR9 0xA4080072UL +#define INTC_IMCR10 0xA4080074UL + +#define INTC_ICR0 0xA414FEE0UL +#define INTC_ICR1 0xA4140010UL + +#define INTC_IRR0 0xA4140004UL + +#define PORT_PACR 0xA4050100UL +#define PORT_PBCR 0xA4050102UL +#define PORT_PCCR 0xA4050104UL +#define PORT_PDCR 0xA4050106UL +#define PORT_PECR 0xA4050108UL +#define PORT_PFCR 0xA405010AUL +#define PORT_PGCR 0xA405010CUL +#define PORT_PHCR 0xA405010EUL +#define PORT_PJCR 0xA4050110UL +#define PORT_PKCR 0xA4050112UL +#define PORT_PLCR 0xA4050114UL +#define PORT_SCPCR 0xA4050116UL +#define PORT_PMCR 0xA4050118UL +#define PORT_PNCR 0xA405011AUL +#define PORT_PQCR 0xA405011CUL + +#define PORT_PSELA 0xA4050140UL +#define PORT_PSELB 0xA4050142UL +#define PORT_PSELC 0xA4050144UL + +#define PORT_HIZCRA 0xA4050146UL +#define PORT_HIZCRB 0xA4050148UL +#define PORT_DRVCR 0xA4050150UL + +#define PORT_PADR 0xA4050120UL +#define PORT_PBDR 0xA4050122UL +#define PORT_PCDR 0xA4050124UL +#define PORT_PDDR 0xA4050126UL +#define PORT_PEDR 0xA4050128UL +#define PORT_PFDR 0xA405012AUL +#define PORT_PGDR 0xA405012CUL +#define PORT_PHDR 0xA405012EUL +#define PORT_PJDR 0xA4050130UL +#define PORT_PKDR 0xA4050132UL +#define PORT_PLDR 0xA4050134UL +#define PORT_SCPDR 0xA4050136UL +#define PORT_PMDR 0xA4050138UL +#define PORT_PNDR 0xA405013AUL +#define PORT_PQDR 0xA405013CUL + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#define IRQ2_IRQ 34 +#define IRQ3_IRQ 35 +#define IRQ4_IRQ 36 +#define IRQ5_IRQ 37 + +#define IRQ0_IPR_ADDR INTC_IPRC +#define IRQ1_IPR_ADDR INTC_IPRC +#define IRQ2_IPR_ADDR INTC_IPRC +#define IRQ3_IPR_ADDR INTC_IPRC +#define IRQ4_IPR_ADDR INTC_IPRD +#define IRQ5_IPR_ADDR INTC_IPRD + +#define IRQ0_IPR_POS 0 +#define IRQ1_IPR_POS 1 +#define IRQ2_IPR_POS 2 +#define IRQ3_IPR_POS 3 +#define IRQ4_IPR_POS 0 +#define IRQ5_IPR_POS 1 + +#define IRQ0_PRIORITY 1 +#define IRQ1_PRIORITY 1 +#define IRQ2_PRIORITY 1 +#define IRQ3_PRIORITY 1 +#define IRQ4_PRIORITY 1 +#define IRQ5_PRIORITY 1 + +extern int ipr_irq_demux(int irq); +#define __irq_demux(irq) ipr_irq_demux(irq) + +#elif defined(CONFIG_CPU_SUBTYPE_SH7604) +#define INTC_IPRA 0xfffffee2UL +#define INTC_IPRB 0xfffffe60UL + +#define INTC_VCRA 0xfffffe62UL +#define INTC_VCRB 0xfffffe64UL +#define INTC_VCRC 0xfffffe66UL +#define INTC_VCRD 0xfffffe68UL + +#define INTC_VCRWDT 0xfffffee4UL +#define INTC_VCRDIV 0xffffff0cUL +#define INTC_VCRDMA0 0xffffffa0UL +#define INTC_VCRDMA1 0xffffffa8UL + +#define INTC_ICR 0xfffffee0UL +#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ + defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) || \ + defined(CONFIG_CPU_SUBTYPE_SH7710) +#define INTC_IRR0 0xa4000004UL +#define INTC_IRR1 0xa4000006UL +#define INTC_IRR2 0xa4000008UL + +#define INTC_ICR0 0xfffffee0UL +#define INTC_ICR1 0xa4000010UL +#define INTC_ICR2 0xa4000012UL +#define INTC_INTER 0xa4000014UL + +#define INTC_IPRC 0xa4000016UL +#define INTC_IPRD 0xa4000018UL +#define INTC_IPRE 0xa400001aUL +#if defined(CONFIG_CPU_SUBTYPE_SH7707) +#define INTC_IPRF 0xa400001cUL +#elif defined(CONFIG_CPU_SUBTYPE_SH7705) +#define INTC_IPRF 0xa4080000UL +#define INTC_IPRG 0xa4080002UL +#define INTC_IPRH 0xa4080004UL +#elif defined(CONFIG_CPU_SUBTYPE_SH7710) +/* Interrupt Controller Registers */ +#undef INTC_IPRA +#undef INTC_IPRB +#define INTC_IPRA 0xA414FEE2UL +#define INTC_IPRB 0xA414FEE4UL +#define INTC_IPRF 0xA4080000UL +#define INTC_IPRG 0xA4080002UL +#define INTC_IPRH 0xA4080004UL +#define INTC_IPRI 0xA4080006UL + +#undef INTC_ICR0 +#undef INTC_ICR1 +#define INTC_ICR0 0xA414FEE0UL +#define INTC_ICR1 0xA4140010UL + +#define INTC_IRR0 0xa4000004UL +#define INTC_IRR1 0xa4000006UL +#define INTC_IRR2 0xa4000008UL +#define INTC_IRR3 0xa400000AUL +#define INTC_IRR4 0xa400000CUL +#define INTC_IRR5 0xa4080020UL +#define INTC_IRR7 0xa4080024UL +#define INTC_IRR8 0xa4080026UL + +/* Interrupt numbers */ +#define TIMER2_IRQ 18 +#define TIMER2_IPR_ADDR INTC_IPRA +#define TIMER2_IPR_POS 1 +#define TIMER2_PRIORITY 2 + +/* WDT */ +#define WDT_IRQ 27 +#define WDT_IPR_ADDR INTC_IPRB +#define WDT_IPR_POS 3 +#define WDT_PRIORITY 2 + +#define SCIF0_ERI_IRQ 52 +#define SCIF0_RXI_IRQ 53 +#define SCIF0_BRI_IRQ 54 +#define SCIF0_TXI_IRQ 55 +#define SCIF0_IPR_ADDR INTC_IPRE +#define SCIF0_IPR_POS 2 +#define SCIF0_PRIORITY 3 + +#define DMTE4_IRQ 76 +#define DMTE5_IRQ 77 +#define DMA2_IPR_ADDR INTC_IPRF +#define DMA2_IPR_POS 2 +#define DMA2_PRIORITY 7 + +#define IPSEC_IRQ 79 +#define IPSEC_IPR_ADDR INTC_IPRF +#define IPSEC_IPR_POS 3 +#define IPSEC_PRIORITY 3 + +/* EDMAC */ +#define EDMAC0_IRQ 80 +#define EDMAC0_IPR_ADDR INTC_IPRG +#define EDMAC0_IPR_POS 3 +#define EDMAC0_PRIORITY 3 + +#define EDMAC1_IRQ 81 +#define EDMAC1_IPR_ADDR INTC_IPRG +#define EDMAC1_IPR_POS 2 +#define EDMAC1_PRIORITY 3 + +#define EDMAC2_IRQ 82 +#define EDMAC2_IPR_ADDR INTC_IPRG +#define EDMAC2_IPR_POS 1 +#define EDMAC2_PRIORITY 3 + +/* SIOF */ +#define SIOF0_ERI_IRQ 96 +#define SIOF0_TXI_IRQ 97 +#define SIOF0_RXI_IRQ 98 +#define SIOF0_CCI_IRQ 99 +#define SIOF0_IPR_ADDR INTC_IPRH +#define SIOF0_IPR_POS 0 +#define SIOF0_PRIORITY 7 + +#define SIOF1_ERI_IRQ 100 +#define SIOF1_TXI_IRQ 101 +#define SIOF1_RXI_IRQ 102 +#define SIOF1_CCI_IRQ 103 +#define SIOF1_IPR_ADDR INTC_IPRI +#define SIOF1_IPR_POS 1 +#define SIOF1_PRIORITY 7 +#endif /* CONFIG_CPU_SUBTYPE_SH7710 */ + +#if defined(CONFIG_CPU_SUBTYPE_SH7710) +#define PORT_PACR 0xa4050100UL +#define PORT_PBCR 0xa4050102UL +#define PORT_PCCR 0xa4050104UL +#define PORT_PETCR 0xa4050106UL +#define PORT_PADR 0xa4050120UL +#define PORT_PBDR 0xa4050122UL +#define PORT_PCDR 0xa4050124UL +#else +#define PORT_PACR 0xa4000100UL +#define PORT_PBCR 0xa4000102UL +#define PORT_PCCR 0xa4000104UL +#define PORT_PFCR 0xa400010aUL +#define PORT_PADR 0xa4000120UL +#define PORT_PBDR 0xa4000122UL +#define PORT_PCDR 0xa4000124UL +#define PORT_PFDR 0xa400012aUL +#endif + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#define IRQ2_IRQ 34 +#define IRQ3_IRQ 35 +#define IRQ4_IRQ 36 +#define IRQ5_IRQ 37 + +#define IRQ0_IPR_ADDR INTC_IPRC +#define IRQ1_IPR_ADDR INTC_IPRC +#define IRQ2_IPR_ADDR INTC_IPRC +#define IRQ3_IPR_ADDR INTC_IPRC +#define IRQ4_IPR_ADDR INTC_IPRD +#define IRQ5_IPR_ADDR INTC_IPRD + +#define IRQ0_IPR_POS 0 +#define IRQ1_IPR_POS 1 +#define IRQ2_IPR_POS 2 +#define IRQ3_IPR_POS 3 +#define IRQ4_IPR_POS 0 +#define IRQ5_IPR_POS 1 + +#define IRQ0_PRIORITY 1 +#define IRQ1_PRIORITY 1 +#define IRQ2_PRIORITY 1 +#define IRQ3_PRIORITY 1 +#define IRQ4_PRIORITY 1 +#define IRQ5_PRIORITY 1 + +#define PINT0_IRQ 40 +#define PINT8_IRQ 41 + +#define PINT0_IPR_ADDR INTC_IPRD +#define PINT8_IPR_ADDR INTC_IPRD + +#define PINT0_IPR_POS 3 +#define PINT8_IPR_POS 2 +#define PINT0_PRIORITY 2 +#define PINT8_PRIORITY 2 + +extern int ipr_irq_demux(int irq); +#define __irq_demux(irq) ipr_irq_demux(irq) +#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 */ + +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202) +#define INTC_ICR 0xffd00000 +#define INTC_ICR_NMIL (1<<15) +#define INTC_ICR_MAI (1<<14) +#define INTC_ICR_NMIB (1<<9) +#define INTC_ICR_NMIE (1<<8) +#define INTC_ICR_IRLM (1<<7) +#endif + +#ifdef CONFIG_CPU_SUBTYPE_SH7780 +#include +#endif + +/* SH with INTC2-style interrupts */ +#ifdef CONFIG_CPU_HAS_INTC2_IRQ +#if defined(CONFIG_CPU_SUBTYPE_ST40STB1) +#define INTC2_BASE 0xfe080000 +#define INTC2_FIRST_IRQ 64 +#define INTC2_INTREQ_OFFSET 0x20 +#define INTC2_INTMSK_OFFSET 0x40 +#define INTC2_INTMSKCLR_OFFSET 0x60 +#define NR_INTC2_IRQS 25 +#elif defined(CONFIG_CPU_SUBTYPE_SH7760) +#define INTC2_BASE 0xfe080000 +#define INTC2_FIRST_IRQ 48 /* INTEVT 0x800 */ +#define INTC2_INTREQ_OFFSET 0x20 +#define INTC2_INTMSK_OFFSET 0x40 +#define INTC2_INTMSKCLR_OFFSET 0x60 +#define NR_INTC2_IRQS 64 +#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#define INTC2_BASE 0xffd40000 +#define INTC2_FIRST_IRQ 21 +#define INTC2_INTMSK_OFFSET (0x38) +#define INTC2_INTMSKCLR_OFFSET (0x3c) +#define NR_INTC2_IRQS 60 +#endif + +#define INTC2_INTPRI_OFFSET 0x00 struct intc2_data { unsigned short irq; @@ -149,14 +693,20 @@ struct intc2_data { void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs); void init_IRQ_intc2(void); +#endif + +extern int shmse_irq_demux(int irq); static inline int generic_irq_demux(int irq) { return irq; } +#ifndef __irq_demux +#define __irq_demux(irq) (irq) +#endif #define irq_canonicalize(irq) (irq) -#define irq_demux(irq) sh_mv.mv_irq_demux(irq) +#define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq)) #ifdef CONFIG_4KSTACKS extern void irq_ctx_init(int cpu); @@ -167,4 +717,12 @@ extern void irq_ctx_exit(int cpu); # define irq_ctx_exit(cpu) do { } while (0) #endif +#if defined(CONFIG_CPU_SUBTYPE_SH73180) +#include +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7343) +#include +#endif + #endif /* __ASM_SH_IRQ_H */ diff --git a/trunk/include/asm-sh/irqflags.h b/trunk/include/asm-sh/irqflags.h deleted file mode 100644 index 9dedc1b693e3..000000000000 --- a/trunk/include/asm-sh/irqflags.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_H -#define __ASM_SH_IRQFLAGS_H - -static inline void raw_local_irq_enable(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %1, %0\n\t" -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r6_bank, %1\n\t" - "or %1, %0\n\t" -#endif - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x000000f0) - : "memory" - ); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); -} - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return (flags != 0); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags, __dummy; - - __asm__ __volatile__ ( - "stc sr, %1\n\t" - "mov %1, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - "mov %1, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags), "=&r" (__dummy) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - if ((flags & 0xf0) != 0xf0) - raw_local_irq_enable(); -} - -#endif /* __ASM_SH_IRQFLAGS_H */ diff --git a/trunk/include/asm-sh/mmu_context.h b/trunk/include/asm-sh/mmu_context.h index 46f04e23bd45..c7088efe579a 100644 --- a/trunk/include/asm-sh/mmu_context.h +++ b/trunk/include/asm-sh/mmu_context.h @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -41,8 +42,10 @@ extern unsigned long mmu_context_cache; /* * Get MMU context if needed. */ -static inline void get_mmu_context(struct mm_struct *mm) +static __inline__ void +get_mmu_context(struct mm_struct *mm) { + extern void flush_tlb_all(void); unsigned long mc = mmu_context_cache; /* Check if we have old version of context. */ @@ -58,7 +61,6 @@ static inline void get_mmu_context(struct mm_struct *mm) * Flush all TLB and start new cycle. */ flush_tlb_all(); - /* * Fix version; Note that we avoid version #0 * to distingush NO_CONTEXT. @@ -73,10 +75,11 @@ static inline void get_mmu_context(struct mm_struct *mm) * Initialize the context related info for a new mm_struct * instance. */ -static inline int init_new_context(struct task_struct *tsk, +static __inline__ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = NO_CONTEXT; + return 0; } @@ -84,12 +87,12 @@ static inline int init_new_context(struct task_struct *tsk, * Destroy context related info for an mm_struct that is about * to be put to rest. */ -static inline void destroy_context(struct mm_struct *mm) +static __inline__ void destroy_context(struct mm_struct *mm) { /* Do nothing */ } -static inline void set_asid(unsigned long asid) +static __inline__ void set_asid(unsigned long asid) { unsigned long __dummy; @@ -102,7 +105,7 @@ static inline void set_asid(unsigned long asid) "r" (0xffffff00)); } -static inline unsigned long get_asid(void) +static __inline__ unsigned long get_asid(void) { unsigned long asid; @@ -117,29 +120,24 @@ static inline unsigned long get_asid(void) * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ -static inline void activate_context(struct mm_struct *mm) +static __inline__ void activate_context(struct mm_struct *mm) { get_mmu_context(mm); set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); } -/* MMU_TTB is used for optimizing the fault handling. */ -static inline void set_TTB(pgd_t *pgd) -{ - ctrl_outl((unsigned long)pgd, MMU_TTB); -} - -static inline pgd_t *get_TTB(void) -{ - return (pgd_t *)ctrl_inl(MMU_TTB); -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) +/* MMU_TTB can be used for optimizing the fault handling. + (Currently not used) */ +static __inline__ void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) { if (likely(prev != next)) { - set_TTB(next->pgd); + unsigned long __pgdir = (unsigned long)next->pgd; + + __asm__ __volatile__("mov.l %0, %1" + : /* no output */ + : "r" (__pgdir), "m" (__m(MMU_TTB))); activate_context(next); } } @@ -149,7 +147,7 @@ static inline void switch_mm(struct mm_struct *prev, #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) -static inline void +static __inline__ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } diff --git a/trunk/include/asm-sh/page.h b/trunk/include/asm-sh/page.h index 380fd62dd05a..ca8b26d90475 100644 --- a/trunk/include/asm-sh/page.h +++ b/trunk/include/asm-sh/page.h @@ -13,16 +13,9 @@ [ P4 control ] 0xE0000000 */ + /* PAGE_SHIFT determines the page size */ -#if defined(CONFIG_PAGE_SIZE_4KB) -# define PAGE_SHIFT 12 -#elif defined(CONFIG_PAGE_SIZE_8KB) -# define PAGE_SHIFT 13 -#elif defined(CONFIG_PAGE_SIZE_64KB) -# define PAGE_SHIFT 16 -#else -# error "Bogus kernel page size?" -#endif +#define PAGE_SHIFT 12 #ifdef __ASSEMBLY__ #define PAGE_SIZE (1 << PAGE_SHIFT) @@ -35,14 +28,8 @@ #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) #define HPAGE_SHIFT 16 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) -#define HPAGE_SHIFT 18 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) #define HPAGE_SHIFT 20 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define HPAGE_SHIFT 22 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) -#define HPAGE_SHIFT 26 #endif #ifdef CONFIG_HUGETLB_PAGE @@ -82,25 +69,15 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); /* * These are used to make use of C type-checking.. */ -#ifdef CONFIG_X2TLB -typedef struct { unsigned long pte_low, pte_high; } pte_t; -typedef struct { unsigned long long pgprot; } pgprot_t; -#define pte_val(x) \ - ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) -#define __pte(x) \ - ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) -#else -typedef struct { unsigned long pte_low; } pte_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#define pte_val(x) ((x).pte_low) -#define __pte(x) ((pte_t) { (x) } ) -#endif - +typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +#define pte_val(x) ((x).pte) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) +#define __pte(x) ((pte_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/trunk/include/asm-sh/pgalloc.h b/trunk/include/asm-sh/pgalloc.h index 888e4529e6fe..e841465ab4d2 100644 --- a/trunk/include/asm-sh/pgalloc.h +++ b/trunk/include/asm-sh/pgalloc.h @@ -1,16 +1,13 @@ #ifndef __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) -{ - set_pmd(pmd, __pmd((unsigned long)pte)); -} +#define pmd_populate_kernel(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { - set_pmd(pmd, __pmd((unsigned long)page_address(pte))); + set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); } /* @@ -18,16 +15,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); - - if (pgd) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } - - return pgd; + return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline void pgd_free(pgd_t *pgd) diff --git a/trunk/include/asm-sh/pgtable-2level.h b/trunk/include/asm-sh/pgtable-2level.h new file mode 100644 index 000000000000..b525db6f61c6 --- /dev/null +++ b/trunk/include/asm-sh/pgtable-2level.h @@ -0,0 +1,70 @@ +#ifndef __ASM_SH_PGTABLE_2LEVEL_H +#define __ASM_SH_PGTABLE_2LEVEL_H + +/* + * traditional two-level paging structure: + */ + +#define PGDIR_SHIFT 22 +#define PTRS_PER_PGD 1024 + +/* + * this is two-level, so we don't really have any + * PMD directory physically. + */ +#define PMD_SHIFT 22 +#define PTRS_PER_PMD 1 + +#define PTRS_PER_PTE 1024 + +#ifndef __ASSEMBLY__ +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear (pgd_t * pgdp) { } + +/* + * Certain architectures need to do special things when PTEs + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) +#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) + +#define pgd_page_vaddr(pgd) \ +((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) + +#define pgd_page(pgd) \ + (phys_to_page(pgd_val(pgd))) + +static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) +{ + return (pmd_t *) dir; +} + +#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_SH_PGTABLE_2LEVEL_H */ diff --git a/trunk/include/asm-sh/pgtable.h b/trunk/include/asm-sh/pgtable.h index c84901dbd8e5..2c8682ad1012 100644 --- a/trunk/include/asm-sh/pgtable.h +++ b/trunk/include/asm-sh/pgtable.h @@ -15,10 +15,15 @@ #include #include +#define PTRS_PER_PGD 1024 + #ifndef __ASSEMBLY__ #include #include +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init(void); + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -28,28 +33,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #endif /* !__ASSEMBLY__ */ -/* - * traditional two-level paging structure - */ -/* PTE bits */ -#ifdef CONFIG_X2TLB -# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ -#else -# define PTE_MAGNITUDE 2 /* 32-bit PTEs */ -#endif -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) - -/* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) -#define PGDIR_BITS (32 - PGDIR_SHIFT) -#define PGDIR_SIZE (1 << PGDIR_SHIFT) +/* traditional two-level paging structure */ +#define PGDIR_SHIFT 22 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE 1024 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -/* Entries per level */ -#define PTRS_PER_PTE (PAGE_SIZE / 4) -#define PTRS_PER_PGD (PAGE_SIZE / 4) - #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 @@ -57,7 +49,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * First 1MB map is used by fixed purpose. - * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c) + * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) */ #define VMALLOC_START (P3SEG+0x00100000) #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) @@ -65,8 +57,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * Linux PTEL encoding. * - * Hardware and software bit definitions for the PTEL value (see below for - * notes on SH-X2 MMUs and 64-bit PTEs): + * Hardware and software bit definitions for the PTEL value: * * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). * @@ -85,57 +76,20 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; * * - Bits 31, 30, and 29 remain unused by everyone and can be used for future * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. - * - * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. - * - * SH-X2 MMUs and extended PTEs - * - * SH-X2 supports an extended mode TLB with split data arrays due to the - * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and - * SZ bit placeholders still exist in data array 1, but are implemented as - * reserved bits, with the real logic existing in data array 2. - * - * The downside to this is that we can no longer fit everything in to a 32-bit - * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus - * side, this gives us quite a few spare bits to play with for future usage. */ -/* Legacy and compat mode bits */ #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ -#ifndef CONFIG_X2TLB -# define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ -# define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ -# define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ -# define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ -#endif +#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ +#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ +#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */ +#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ #define _PAGE_PROTNONE 0x200 /* software: if not present */ #define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ -/* Extended mode bits */ -#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ -#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ -#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ -#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ - -#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ -#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ -#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ - -#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ -#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ -#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ - -/* Wrapper for extended mode pgprot twiddling */ -#ifdef CONFIG_X2TLB -# define _PAGE_EXT(x) ((unsigned long long)(x) << 32) -#else -# define _PAGE_EXT(x) (0) -#endif - /* software: moves to PTEA.TC (Timing Control) */ #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ @@ -160,160 +114,37 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) -/* Hardware flags, page size encoding */ -#if defined(CONFIG_X2TLB) -# if defined(CONFIG_PAGE_SIZE_4KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) -# elif defined(CONFIG_PAGE_SIZE_8KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) -# elif defined(CONFIG_PAGE_SIZE_64KB) -# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) -# endif -#else -# if defined(CONFIG_PAGE_SIZE_4KB) -# define _PAGE_FLAGS_HARD _PAGE_SZ0 -# elif defined(CONFIG_PAGE_SIZE_64KB) -# define _PAGE_FLAGS_HARD _PAGE_SZ1 -# endif -#endif +/* Hardware flags: SZ0=1 (4k-byte) */ +#define _PAGE_FLAGS_HARD _PAGE_SZ0 -#if defined(CONFIG_X2TLB) -# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) -# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) -# endif -#else -# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -# define _PAGE_SZHUGE (_PAGE_SZ1) -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) -# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) -# endif -#endif - -/* - * Stub out _PAGE_SZHUGE if we don't have a good definition for it, - * to make pte_mkhuge() happy. - */ -#ifndef _PAGE_SZHUGE -# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) +#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define _PAGE_SZHUGE (_PAGE_SZ1) +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) +#define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) #endif -#define _PAGE_CHG_MASK \ - (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) #ifndef __ASSEMBLY__ -#if defined(CONFIG_X2TLB) /* SH-X2 TLB */ -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_USER_READ | \ - _PAGE_EXT_USER_WRITE)) - -#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_USER_EXEC | \ - _PAGE_EXT_USER_READ)) - -#define PAGE_COPY PAGE_EXECREAD - -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_USER_READ)) - -#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_USER_WRITE)) - -#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_USER_WRITE | \ - _PAGE_EXT_USER_READ | \ - _PAGE_EXT_USER_EXEC)) - -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC)) - +#ifdef CONFIG_MMU +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) #define PAGE_KERNEL_NOCACHE \ - __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_HW_SHARED | \ - _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC)) - -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_EXEC)) - -#define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ - _PAGE_EXT(_PAGE_EXT_KERN_READ | \ - _PAGE_EXT_KERN_WRITE | \ - _PAGE_EXT_KERN_EXEC) \ - (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ - (type)) - -#elif defined(CONFIG_MMU) /* SH-X TLB */ -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_CACHABLE | _PAGE_ACCESSED | \ - _PAGE_FLAGS_HARD) - -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD) - -#define PAGE_EXECREAD PAGE_READONLY -#define PAGE_RWX PAGE_SHARED -#define PAGE_WRITEONLY PAGE_SHARED - -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) - -#define PAGE_KERNEL_NOCACHE \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_HW_SHARED | \ - _PAGE_FLAGS_HARD) - -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ - _PAGE_DIRTY | _PAGE_ACCESSED | \ - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) - + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) #define PAGE_KERNEL_PCC(slot, type) \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ - (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ - (type)) + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type)) #else /* no mmu */ #define PAGE_NONE __pgprot(0) #define PAGE_SHARED __pgprot(0) #define PAGE_COPY __pgprot(0) -#define PAGE_EXECREAD __pgprot(0) -#define PAGE_RWX __pgprot(0) #define PAGE_READONLY __pgprot(0) -#define PAGE_WRITEONLY __pgprot(0) #define PAGE_KERNEL __pgprot(0) #define PAGE_KERNEL_NOCACHE __pgprot(0) #define PAGE_KERNEL_RO __pgprot(0) @@ -323,32 +154,27 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #endif /* __ASSEMBLY__ */ /* - * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page - * protection for execute, and considers it the same as a read. Also, write - * permission implies read permission. This is the closest we can get.. - * - * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, - * not only supporting separate execute, read, and write bits, but having - * completely separate permission bits for user and kernel space. + * As i386 and MIPS, SuperH can't do page protection for execute, and + * considers that the same as a read. Also, write permissions imply + * read permissions. This is the closest we can get.. */ - /*xwr*/ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY #define __P110 PAGE_COPY #define __P111 PAGE_COPY #define __S000 PAGE_NONE #define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY +#define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED #ifndef __ASSEMBLY__ @@ -357,17 +183,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; * within a page table are directly modified. Thus, the following * hook is made available. */ -#ifdef CONFIG_X2TLB -static inline void set_pte(pte_t *ptep, pte_t pte) -{ - ptep->pte_high = pte.pte_high; - smp_wmb(); - ptep->pte_low = pte.pte_low; -} -#else #define set_pte(pteptr, pteval) (*(pteptr) = pteval) -#endif - #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* @@ -376,18 +192,18 @@ static inline void set_pte(pte_t *ptep, pte_t pte) */ #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) -#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) +#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_none(x) (!pmd_val(x)) -#define pmd_present(x) (pmd_val(x)) +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) @@ -396,52 +212,28 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT)) -#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) -#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) -#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) - -#ifdef CONFIG_X2TLB -#define pte_read(pte) ((pte).pte_high & _PAGE_EXT_USER_READ) -#define pte_exec(pte) ((pte).pte_high & _PAGE_EXT_USER_EXEC) -#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) -#else -#define pte_read(pte) (pte_val(pte) & _PAGE_USER) -#define pte_exec(pte) (pte_val(pte) & _PAGE_USER) -#define pte_write(pte) (pte_val(pte) & _PAGE_RW) +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } +static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } +static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; } +static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); } + +static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } +static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } +static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } +static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } +static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } +static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } +static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } +static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } +static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } +static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } +#ifdef CONFIG_HUGETLB_PAGE +static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } #endif -#define PTE_BIT_FUNC(h,fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } - -#ifdef CONFIG_X2TLB -/* - * We cheat a bit in the SH-X2 TLB case. As the permission bits are - * individually toggled (and user permissions are entirely decoupled from - * kernel permissions), we attempt to couple them a bit more sanely here. - */ -PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ); -PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ); -PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); -PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); -PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC); -PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC); -PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); -#else -PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER); -PTE_BIT_FUNC(low, mkread, |= _PAGE_USER); -PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); -PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); -PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER); -PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER); -PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); -#endif - -PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); -PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); -PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); -PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); - /* * Macro and implementation to make a page protection as uncachable. */ @@ -466,14 +258,13 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | - pgprot_val(newprot))); - return pte; -} +{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } + +#define pmd_page_vaddr(pmd) \ +((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) -#define pmd_page_vaddr(pmd) pmd_val(pmd) -#define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) +#define pmd_page(pmd) \ + (phys_to_page(pmd_val(pmd))) /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) @@ -492,15 +283,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) -#ifdef CONFIG_X2TLB -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ - &(e), (e).pte_high, (e).pte_low) -#else #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) -#endif - #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) @@ -553,9 +337,6 @@ extern unsigned int kobjsize(const void *objp); extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #endif -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -extern void paging_init(void); - #include #endif /* !__ASSEMBLY__ */ diff --git a/trunk/include/asm-sh/processor.h b/trunk/include/asm-sh/processor.h index 6f1dd7ca1b1d..45bb74e35d32 100644 --- a/trunk/include/asm-sh/processor.h +++ b/trunk/include/asm-sh/processor.h @@ -36,10 +36,7 @@ */ enum cpu_type { /* SH-2 types */ - CPU_SH7604, CPU_SH7619, - - /* SH-2A types */ - CPU_SH7206, + CPU_SH7604, /* SH-3 types */ CPU_SH7705, CPU_SH7706, CPU_SH7707, @@ -50,10 +47,7 @@ enum cpu_type { /* SH-4 types */ CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, - - /* SH-4A types */ CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, - CPU_SH7785, /* Unknown subtype */ CPU_SH_NONE @@ -136,11 +130,12 @@ union sh_fpu_union { }; struct thread_struct { - /* Saved registers when thread is descheduled */ unsigned long sp; unsigned long pc; - /* Hardware debugging registers */ + unsigned long trap_no, error_code; + unsigned long address; + /* Hardware debugging registers may come here */ unsigned long ubc_pc; /* floating point info */ @@ -155,7 +150,12 @@ typedef struct { extern int ubc_usercnt; #define INIT_THREAD { \ - .sp = sizeof(init_stack) + (long) &init_stack, \ + sizeof(init_stack) + (long) &init_stack, /* sp */ \ + 0, /* pc */ \ + 0, 0, \ + 0, \ + 0, \ + {{{0,}},} /* fpu state */ \ } /* @@ -259,8 +259,8 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); extern unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) +#define KSTK_EIP(tsk) ((tsk)->thread.pc) +#define KSTK_ESP(tsk) ((tsk)->thread.sp) #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") #define cpu_relax() barrier() diff --git a/trunk/include/asm-sh/push-switch.h b/trunk/include/asm-sh/push-switch.h deleted file mode 100644 index dfc6bad567f0..000000000000 --- a/trunk/include/asm-sh/push-switch.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef __ASM_SH_PUSH_SWITCH_H -#define __ASM_SH_PUSH_SWITCH_H - -#include -#include -#include - -struct push_switch { - /* switch state */ - unsigned int state:1; - /* debounce timer */ - struct timer_list debounce; - /* workqueue */ - struct work_struct work; -}; - -struct push_switch_platform_info { - /* IRQ handler */ - irqreturn_t (*irq_handler)(int irq, void *data); - /* Special IRQ flags */ - unsigned int irq_flags; - /* Bit location of switch */ - unsigned int bit; - /* Symbolic switch name */ - const char *name; -}; - -#endif /* __ASM_SH_PUSH_SWITCH_H */ diff --git a/trunk/include/asm-sh/rwsem.h b/trunk/include/asm-sh/rwsem.h index 4931ba817d73..9d2aea5e8488 100644 --- a/trunk/include/asm-sh/rwsem.h +++ b/trunk/include/asm-sh/rwsem.h @@ -25,21 +25,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -49,16 +39,6 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - static inline void init_rwsem(struct rw_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; @@ -161,11 +141,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem) rwsem_downgrade_wake(sem); } -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) -{ - __down_write(sem); -} - /* * implement exchange and add functionality */ diff --git a/trunk/include/asm-sh/se7206.h b/trunk/include/asm-sh/se7206.h deleted file mode 100644 index 698eb80389ab..000000000000 --- a/trunk/include/asm-sh/se7206.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_SH_SE7206_H -#define __ASM_SH_SE7206_H - -#define PA_SMSC 0x30000000 -#define PA_MRSHPC 0x34000000 -#define PA_LED 0x31400000 - -void init_se7206_IRQ(void); - -#define __IO_PREFIX se7206 -#include - -#endif /* __ASM_SH_SE7206_H */ diff --git a/trunk/include/asm-sh/setup.h b/trunk/include/asm-sh/setup.h index 1583c6b7bdaa..34ca8a7f06ba 100644 --- a/trunk/include/asm-sh/setup.h +++ b/trunk/include/asm-sh/setup.h @@ -1,12 +1,10 @@ +#ifdef __KERNEL__ #ifndef _SH_SETUP_H #define _SH_SETUP_H #define COMMAND_LINE_SIZE 256 -#ifdef __KERNEL__ - int setup_early_printk(char *); -#endif /* __KERNEL__ */ - #endif /* _SH_SETUP_H */ +#endif /* __KERNEL__ */ diff --git a/trunk/include/asm-sh/system.h b/trunk/include/asm-sh/system.h index b1e42e7f998b..3340126f4e0f 100644 --- a/trunk/include/asm-sh/system.h +++ b/trunk/include/asm-sh/system.h @@ -6,7 +6,6 @@ * Copyright (C) 2002 Paul Mundt */ -#include #include /* @@ -132,6 +131,103 @@ static inline unsigned long tas(volatile int *m) #define set_mb(var, value) do { xchg(&var, value); } while (0) +/* Interrupt Control */ +#ifdef CONFIG_CPU_HAS_SR_RB +static inline void local_irq_enable(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__("stc sr, %0\n\t" + "and %1, %0\n\t" + "stc r6_bank, %1\n\t" + "or %1, %0\n\t" + "ldc %0, sr" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x000000f0) + : "memory"); +} +#else +static inline void local_irq_enable(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %1, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x000000f0) + : "memory"); +} +#endif + +static inline void local_irq_disable(void) +{ + unsigned long __dummy; + __asm__ __volatile__("stc sr, %0\n\t" + "or #0xf0, %0\n\t" + "ldc %0, sr" + : "=&z" (__dummy) + : /* no inputs */ + : "memory"); +} + +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ("stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory"); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ("stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory"); +} + +#define local_save_flags(x) \ + __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (flags != 0); \ +}) + +static inline unsigned long local_irq_save(void) +{ + unsigned long flags, __dummy; + + __asm__ __volatile__("stc sr, %1\n\t" + "mov %1, %0\n\t" + "or #0xf0, %0\n\t" + "ldc %0, sr\n\t" + "mov %1, %0\n\t" + "and #0xf0, %0" + : "=&z" (flags), "=&r" (__dummy) + :/**/ + : "memory" ); + return flags; +} + +#define local_irq_restore(x) do { \ + if ((x & 0x000000f0) != 0x000000f0) \ + local_irq_enable(); \ +} while (0) + /* * Jump to P2 area. * When handling TLB or caches, we need to do it from P2 area. @@ -168,6 +264,9 @@ do { \ : "=&r" (__dummy)); \ } while (0) +/* For spinlocks etc */ +#define local_irq_save(x) x = local_irq_save() + static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long flags, retval; diff --git a/trunk/include/asm-sh/thread_info.h b/trunk/include/asm-sh/thread_info.h index 0c01dc550819..3ebc3f9039eb 100644 --- a/trunk/include/asm-sh/thread_info.h +++ b/trunk/include/asm-sh/thread_info.h @@ -90,7 +90,13 @@ static inline struct thread_info *current_thread_info(void) #endif #define free_thread_info(ti) kfree(ti) -#endif /* __ASSEMBLY__ */ +#else /* !__ASSEMBLY__ */ + +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg) \ + stc r7_bank, reg + +#endif /* * thread information flags diff --git a/trunk/include/asm-sh/timer.h b/trunk/include/asm-sh/timer.h index 17b5e76a4c31..5df842bcf7b6 100644 --- a/trunk/include/asm-sh/timer.h +++ b/trunk/include/asm-sh/timer.h @@ -18,32 +18,11 @@ struct sys_timer { struct sys_device dev; struct sys_timer_ops *ops; - -#ifdef CONFIG_NO_IDLE_HZ - struct dyn_tick_timer *dyn_tick; -#endif }; -#ifdef CONFIG_NO_IDLE_HZ -#define DYN_TICK_ENABLED (1 << 1) - -struct dyn_tick_timer { - spinlock_t lock; - unsigned int state; /* Current state */ - int (*enable)(void); /* Enables dynamic tick */ - int (*disable)(void); /* Disables dynamic tick */ - void (*reprogram)(unsigned long); /* Reprograms the timer */ - int (*handler)(int, void *); -}; - -void timer_dyn_reprogram(void); -#else -#define timer_dyn_reprogram() do { } while (0) -#endif - #define TICK_SIZE (tick_nsec / 1000) -extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer; +extern struct sys_timer tmu_timer; extern struct sys_timer *sys_timer; #ifndef CONFIG_GENERIC_TIME diff --git a/trunk/include/asm-sh/titan.h b/trunk/include/asm-sh/titan.h index 03f3583c8918..270a4f4bc8a9 100644 --- a/trunk/include/asm-sh/titan.h +++ b/trunk/include/asm-sh/titan.h @@ -1,8 +1,9 @@ /* * Platform defintions for Titan */ -#ifndef _ASM_SH_TITAN_H -#define _ASM_SH_TITAN_H + +#ifndef _ASM_SH_TITAN_TITAN_H +#define _ASM_SH_TITAN_TITAN_H #define __IO_PREFIX titan #include @@ -14,4 +15,29 @@ #define TITAN_IRQ_MPCIB 11 /* mPCI B */ #define TITAN_IRQ_USB 11 /* USB */ -#endif /* __ASM_SH_TITAN_H */ +/* + * The external interrupt lines, these take up ints 0 - 15 inclusive + * depending on the priority for the interrupt. In fact the priority + * is the interrupt :-) + */ +#define IRL0_IRQ 0 +#define IRL0_IPR_ADDR INTC_IPRD +#define IRL0_IPR_POS 3 +#define IRL0_PRIORITY 8 + +#define IRL1_IRQ 1 +#define IRL1_IPR_ADDR INTC_IPRD +#define IRL1_IPR_POS 2 +#define IRL1_PRIORITY 8 + +#define IRL2_IRQ 2 +#define IRL2_IPR_ADDR INTC_IPRD +#define IRL2_IPR_POS 1 +#define IRL2_PRIORITY 8 + +#define IRL3_IRQ 3 +#define IRL3_IPR_ADDR INTC_IPRD +#define IRL3_IPR_POS 0 +#define IRL3_PRIORITY 8 + +#endif diff --git a/trunk/include/asm-sh/unistd.h b/trunk/include/asm-sh/unistd.h index f982073dc6c6..1c2abde122cd 100644 --- a/trunk/include/asm-sh/unistd.h +++ b/trunk/include/asm-sh/unistd.h @@ -332,6 +332,125 @@ #ifdef __KERNEL__ +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: + * see */ + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + /* Avoid using "res" which is declared to be in register r0; \ + errno might expand to a function call and clobber it. */ \ + int __err = -(res); \ + errno = __err; \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + +/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ +#define _syscall0(type,name) \ +type name(void) \ +{ \ +register long __sc0 __asm__ ("r3") = __NR_##name; \ +__asm__ __volatile__ ("trapa #0x10" \ + : "=z" (__sc0) \ + : "0" (__sc0) \ + : "memory" ); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +register long __sc0 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +__asm__ __volatile__ ("trapa #0x11" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4) \ + : "memory"); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +register long __sc0 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +register long __sc5 __asm__ ("r5") = (long) arg2; \ +__asm__ __volatile__ ("trapa #0x12" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4), "r" (__sc5) \ + : "memory"); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +register long __sc0 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +register long __sc5 __asm__ ("r5") = (long) arg2; \ +register long __sc6 __asm__ ("r6") = (long) arg3; \ +__asm__ __volatile__ ("trapa #0x13" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \ + : "memory"); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +register long __sc0 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +register long __sc5 __asm__ ("r5") = (long) arg2; \ +register long __sc6 __asm__ ("r6") = (long) arg3; \ +register long __sc7 __asm__ ("r7") = (long) arg4; \ +__asm__ __volatile__ ("trapa #0x14" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \ + "r" (__sc7) \ + : "memory" ); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ +register long __sc3 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +register long __sc5 __asm__ ("r5") = (long) arg2; \ +register long __sc6 __asm__ ("r6") = (long) arg3; \ +register long __sc7 __asm__ ("r7") = (long) arg4; \ +register long __sc0 __asm__ ("r0") = (long) arg5; \ +__asm__ __volatile__ ("trapa #0x15" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ + "r" (__sc3) \ + : "memory" ); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ +{ \ +register long __sc3 __asm__ ("r3") = __NR_##name; \ +register long __sc4 __asm__ ("r4") = (long) arg1; \ +register long __sc5 __asm__ ("r5") = (long) arg2; \ +register long __sc6 __asm__ ("r6") = (long) arg3; \ +register long __sc7 __asm__ ("r7") = (long) arg4; \ +register long __sc0 __asm__ ("r0") = (long) arg5; \ +register long __sc1 __asm__ ("r1") = (long) arg6; \ +__asm__ __volatile__ ("trapa #0x16" \ + : "=z" (__sc0) \ + : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ + "r" (__sc3), "r" (__sc1) \ + : "memory" ); \ +__syscall_return(type,__sc0); \ +} + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/trunk/include/asm-sh64/dma-mapping.h b/trunk/include/asm-sh64/dma-mapping.h index 5efe906c59f7..68e27a8fca31 100644 --- a/trunk/include/asm-sh64/dma-mapping.h +++ b/trunk/include/asm-sh64/dma-mapping.h @@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, consistent_free(NULL, size, vaddr, dma_handle); } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { dma_cache_wback_inv((unsigned long)vaddr, size); diff --git a/trunk/include/asm-sh64/setup.h b/trunk/include/asm-sh64/setup.h index 5b07b14c2927..ebd42eb1b709 100644 --- a/trunk/include/asm-sh64/setup.h +++ b/trunk/include/asm-sh64/setup.h @@ -1,10 +1,6 @@ #ifndef __ASM_SH64_SETUP_H #define __ASM_SH64_SETUP_H -#define COMMAND_LINE_SIZE 256 - -#ifdef __KERNEL__ - #define PARAM ((unsigned char *)empty_zero_page) #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004)) @@ -16,7 +12,5 @@ #define COMMAND_LINE ((char *) (PARAM+256)) #define COMMAND_LINE_SIZE 256 -#endif /* __KERNEL__ */ - #endif /* __ASM_SH64_SETUP_H */ diff --git a/trunk/include/asm-sh64/unistd.h b/trunk/include/asm-sh64/unistd.h index 1f38a7aacaaf..ee7828b27ad1 100644 --- a/trunk/include/asm-sh64/unistd.h +++ b/trunk/include/asm-sh64/unistd.h @@ -347,6 +347,148 @@ #ifdef __KERNEL__ #define NR_syscalls 321 +#include + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO: + * see */ + +#define __syscall_return(type, res) \ +do { \ + /* Note: when returning from kernel the return value is in r9 \ + ** This prevents conflicts between return value and arg1 \ + ** when dispatching signal handler, in other words makes \ + ** life easier in the system call epilogue (see entry.S) \ + */ \ + register unsigned long __sr2 __asm__ ("r2") = res; \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + errno = -(res); \ + __sr2 = -1; \ + } \ + return (type) (__sr2); \ +} while (0) + +/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "()" \ + : "=r" (__sc0) \ + : "r" (__sc0) ); \ +__syscall_return(type,__sc0); \ +} + + /* + * The apparent spurious "dummy" assembler comment is *needed*, + * as without it, the compiler treats the arg variables + * as no longer live just before the asm. The compiler can + * then optimize the storage into any registers it wishes. + * The additional dummy statement forces the compiler to put + * the arguments into the correct registers before the TRAPA. + */ +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2)); \ +__asm__ __volatile__ ("!dummy %0 %1" \ + : \ + : "r" (__sc0), "r" (__sc2)); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \ +__asm__ __volatile__ ("!dummy %0 %1 %2" \ + : \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \ +register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \ +__asm__ __volatile__ ("!dummy %0 %1 %2 %3" \ + : \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \ +register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \ +register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\ +__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4" \ + : \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\ +__syscall_return(type,__sc0); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \ +register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \ +register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \ +register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \ + "r" (__sc6)); \ +__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5" \ + : \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \ + "r" (__sc6)); \ +__syscall_return(type,__sc0); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ +{ \ +register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \ +register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \ +register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \ +register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \ +register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \ +register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \ +register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6; \ +__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)" \ + : "=r" (__sc0) \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \ + "r" (__sc6), "r" (__sc7)); \ +__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5 %6" \ + : \ + : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \ + "r" (__sc6), "r" (__sc7)); \ +__syscall_return(type,__sc0); \ +} #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/include/asm-sparc/unistd.h b/trunk/include/asm-sparc/unistd.h index d5b2f8053b3b..f7827fa4cd5e 100644 --- a/trunk/include/asm-sparc/unistd.h +++ b/trunk/include/asm-sparc/unistd.h @@ -329,6 +329,136 @@ * find a free slot in the 0-302 range. */ +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res)\ + : "r" (__g1) \ + : "o0", "cc"); \ +if (__res < -255 || __res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__g1) \ + : "cc"); \ +if (__res < -255 || __res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__g1) \ + : "cc"); \ +if (__res < -255 || __res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \ + : "cc"); \ +if (__res < -255 || __res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +register long __o3 __asm__ ("o3") = (long)(arg4); \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \ + : "cc"); \ +if (__res < -255 || __res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +register long __o3 __asm__ ("o3") = (long)(arg4); \ +register long __o4 __asm__ ("o4") = (long)(arg5); \ +__asm__ __volatile__ ("t 0x10\n\t" \ + "bcc 1f\n\t" \ + "mov %%o0, %0\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "1:\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \ + : "cc"); \ +if (__res < -255 || __res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-sparc64/dma-mapping.h b/trunk/include/asm-sparc64/dma-mapping.h index 2f858a2df94a..27c46fbeebd6 100644 --- a/trunk/include/asm-sparc64/dma-mapping.h +++ b/trunk/include/asm-sparc64/dma-mapping.h @@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline int dma_get_cache_alignment(void) @@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, } static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { /* could define this in terms of the dma_cache ... operations, diff --git a/trunk/include/asm-sparc64/futex.h b/trunk/include/asm-sparc64/futex.h index 876312fe82cc..7392fc4a954e 100644 --- a/trunk/include/asm-sparc64/futex.h +++ b/trunk/include/asm-sparc64/futex.h @@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-sparc64/pgalloc.h b/trunk/include/asm-sparc64/pgalloc.h index 5891ff7ba760..010f9cd0a672 100644 --- a/trunk/include/asm-sparc64/pgalloc.h +++ b/trunk/include/asm-sparc64/pgalloc.h @@ -13,7 +13,7 @@ #include /* Page table allocation/freeing. */ -extern struct kmem_cache *pgtable_cache; +extern kmem_cache_t *pgtable_cache; static inline pgd_t *pgd_alloc(struct mm_struct *mm) { diff --git a/trunk/include/asm-sparc64/unistd.h b/trunk/include/asm-sparc64/unistd.h index 47047536f261..63669dad0d72 100644 --- a/trunk/include/asm-sparc64/unistd.h +++ b/trunk/include/asm-sparc64/unistd.h @@ -332,6 +332,124 @@ * find a free slot in the 0-302 range. */ +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res)\ + : "r" (__g1) \ + : "o0", "cc"); \ +if (__res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__g1) \ + : "cc"); \ +if (__res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__g1) \ + : "cc"); \ +if (__res >= 0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \ + : "cc"); \ +if (__res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +register long __o3 __asm__ ("o3") = (long)(arg4); \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \ + : "cc"); \ +if (__res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +register long __g1 __asm__ ("g1") = __NR_##name; \ +register long __o0 __asm__ ("o0") = (long)(arg1); \ +register long __o1 __asm__ ("o1") = (long)(arg2); \ +register long __o2 __asm__ ("o2") = (long)(arg3); \ +register long __o3 __asm__ ("o3") = (long)(arg4); \ +register long __o4 __asm__ ("o4") = (long)(arg5); \ +__asm__ __volatile__ ("t 0x6d\n\t" \ + "sub %%g0, %%o0, %0\n\t" \ + "movcc %%xcc, %%o0, %0\n\t" \ + : "=r" (__res), "=&r" (__o0) \ + : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \ + : "cc"); \ +if (__res>=0) \ + return (type) __res; \ +errno = -__res; \ +return -1; \ +} + /* sysconf options, for SunOS compatibility */ #define _SC_ARG_MAX 1 #define _SC_CHILD_MAX 2 diff --git a/trunk/include/asm-um/dma-mapping.h b/trunk/include/asm-um/dma-mapping.h index f0ee4fb55911..babd29895114 100644 --- a/trunk/include/asm-um/dma-mapping.h +++ b/trunk/include/asm-um/dma-mapping.h @@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline int dma_get_cache_alignment(void) @@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, } static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { BUG(); diff --git a/trunk/include/asm-v850/irq.h b/trunk/include/asm-v850/irq.h index 88687c181f01..1bf096db8f4c 100644 --- a/trunk/include/asm-v850/irq.h +++ b/trunk/include/asm-v850/irq.h @@ -46,6 +46,8 @@ extern void init_irq_handlers (int base_irq, int num, int interval, struct hw_interrupt_type *irq_type); +typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs); + /* Handle interrupt IRQ. REGS are the registers at the time of ther interrupt. */ extern unsigned int handle_irq (int irq, struct pt_regs *regs); diff --git a/trunk/include/asm-v850/unistd.h b/trunk/include/asm-v850/unistd.h index 2241ed45ecfe..737401e7d3ad 100644 --- a/trunk/include/asm-v850/unistd.h +++ b/trunk/include/asm-v850/unistd.h @@ -204,8 +204,168 @@ #define __NR_gettid 201 #define __NR_tkill 202 + +/* Syscall protocol: + Syscall number in r12, args in r6-r9, r13-r14 + Return value in r10 + Trap 0 for `short' syscalls, where all the args can fit in function + call argument registers, and trap 1 when there are additional args in + r13-r14. */ + +#define SYSCALL_NUM "r12" +#define SYSCALL_ARG0 "r6" +#define SYSCALL_ARG1 "r7" +#define SYSCALL_ARG2 "r8" +#define SYSCALL_ARG3 "r9" +#define SYSCALL_ARG4 "r13" +#define SYSCALL_ARG5 "r14" +#define SYSCALL_RET "r10" + +#define SYSCALL_SHORT_TRAP "0" +#define SYSCALL_LONG_TRAP "1" + +/* Registers clobbered by any syscall. This _doesn't_ include the syscall + number (r12) or the `extended arg' registers (r13, r14), even though + they are actually clobbered too (this is because gcc's `asm' statement + doesn't allow a clobber to be used as an input or output). */ +#define SYSCALL_CLOBBERS "r1", "r5", "r11", "r15", "r16", \ + "r17", "r18", "r19" + +/* Registers clobbered by a `short' syscall. This includes all clobbers + except the syscall number (r12). */ +#define SYSCALL_SHORT_CLOBBERS SYSCALL_CLOBBERS, "r13", "r14" + #ifdef __KERNEL__ +#include +#include + +#define __syscall_return(type, res) \ + do { \ + /* user-visible error numbers are in the range -1 - -MAX_ERRNO: \ + see */ \ + if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ + } while (0) + + +#define _syscall0(type, name) \ +type name (void) \ +{ \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \ + : "=r" (__ret), "=r" (__syscall) \ + : "1" (__syscall) \ + : SYSCALL_SHORT_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define _syscall1(type, name, atype, a) \ +type name (atype a) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \ + : "=r" (__ret), "=r" (__syscall) \ + : "1" (__syscall), "r" (__a) \ + : SYSCALL_SHORT_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define _syscall2(type, name, atype, a, btype, b) \ +type name (atype a, btype b) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register btype __b __asm__ (SYSCALL_ARG1) = b; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \ + : "=r" (__ret), "=r" (__syscall) \ + : "1" (__syscall), "r" (__a), "r" (__b) \ + : SYSCALL_SHORT_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define _syscall3(type, name, atype, a, btype, b, ctype, c) \ +type name (atype a, btype b, ctype c) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register btype __b __asm__ (SYSCALL_ARG1) = b; \ + register ctype __c __asm__ (SYSCALL_ARG2) = c; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \ + : "=r" (__ret), "=r" (__syscall) \ + : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c) \ + : SYSCALL_SHORT_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \ +type name (atype a, btype b, ctype c, dtype d) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register btype __b __asm__ (SYSCALL_ARG1) = b; \ + register ctype __c __asm__ (SYSCALL_ARG2) = c; \ + register dtype __d __asm__ (SYSCALL_ARG3) = d; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \ + : "=r" (__ret), "=r" (__syscall) \ + : "1" (__syscall), \ + "r" (__a), "r" (__b), "r" (__c), "r" (__d) \ + : SYSCALL_SHORT_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\ +type name (atype a, btype b, ctype c, dtype d, etype e) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register btype __b __asm__ (SYSCALL_ARG1) = b; \ + register ctype __c __asm__ (SYSCALL_ARG2) = c; \ + register dtype __d __asm__ (SYSCALL_ARG3) = d; \ + register etype __e __asm__ (SYSCALL_ARG4) = e; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \ + : "=r" (__ret), "=r" (__syscall), "=r" (__e) \ + : "1" (__syscall), \ + "r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \ + : SYSCALL_CLOBBERS); \ + __syscall_return (type, __ret); \ +} + +#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f) \ + __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \ + : "=r" (ret), "=r" (syscall), \ + "=r" (e), "=r" (f) \ + : "1" (syscall), \ + "r" (a), "r" (b), "r" (c), "r" (d), \ + "2" (e), "3" (f) \ + : SYSCALL_CLOBBERS); + +#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \ +type name (atype a, btype b, ctype c, dtype d, etype e, ftype f) \ +{ \ + register atype __a __asm__ (SYSCALL_ARG0) = a; \ + register btype __b __asm__ (SYSCALL_ARG1) = b; \ + register ctype __c __asm__ (SYSCALL_ARG2) = c; \ + register dtype __d __asm__ (SYSCALL_ARG3) = d; \ + register etype __e __asm__ (SYSCALL_ARG4) = e; \ + register etype __f __asm__ (SYSCALL_ARG5) = f; \ + register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \ + register unsigned long __ret __asm__ (SYSCALL_RET); \ + __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f); \ + __syscall_return (type, __ret); \ +} + + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/trunk/include/asm-x86_64/Kbuild b/trunk/include/asm-x86_64/Kbuild index 763521358fb8..1ee9b07f3fe6 100644 --- a/trunk/include/asm-x86_64/Kbuild +++ b/trunk/include/asm-x86_64/Kbuild @@ -12,6 +12,7 @@ header-y += ldt.h header-y += msr.h header-y += prctl.h header-y += ptrace-abi.h +header-y += setup.h header-y += sigcontext32.h header-y += ucontext.h header-y += vsyscall32.h diff --git a/trunk/include/asm-x86_64/atomic.h b/trunk/include/asm-x86_64/atomic.h index 93849f7abc24..007e88d6d43f 100644 --- a/trunk/include/asm-x86_64/atomic.h +++ b/trunk/include/asm-x86_64/atomic.h @@ -21,7 +21,7 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } diff --git a/trunk/include/asm-x86_64/dma-mapping.h b/trunk/include/asm-x86_64/dma-mapping.h index be9ec6890723..10174b110a5c 100644 --- a/trunk/include/asm-x86_64/dma-mapping.h +++ b/trunk/include/asm-x86_64/dma-mapping.h @@ -180,13 +180,12 @@ static inline int dma_get_cache_alignment(void) return boot_cpu_data.x86_clflush_size; } -#define dma_is_consistent(d, h) 1 +#define dma_is_consistent(h) 1 extern int dma_set_mask(struct device *dev, u64 mask); static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { flush_write_buffers(); } diff --git a/trunk/include/asm-x86_64/futex.h b/trunk/include/asm-x86_64/futex.h index 5cdfb08013c3..9804bf07b092 100644 --- a/trunk/include/asm-x86_64/futex.h +++ b/trunk/include/asm-x86_64/futex.h @@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - pagefault_disable(); + inc_preempt_count(); switch (op) { case FUTEX_OP_SET: @@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); + dec_preempt_count(); if (!ret) { switch (cmp) { diff --git a/trunk/include/asm-x86_64/smp.h b/trunk/include/asm-x86_64/smp.h index f1bdd500d7ac..d6b7c057edba 100644 --- a/trunk/include/asm-x86_64/smp.h +++ b/trunk/include/asm-x86_64/smp.h @@ -118,6 +118,13 @@ static __inline int logical_smp_processor_id(void) #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #else #define cpu_physical_id(cpu) boot_cpu_id +static inline int smp_call_function_single(int cpuid, void (*func) (void *info), + void *info, int retry, int wait) +{ + /* Disable interrupts here? */ + func(info); + return 0; +} #endif /* !CONFIG_SMP */ #endif diff --git a/trunk/include/asm-x86_64/spinlock_types.h b/trunk/include/asm-x86_64/spinlock_types.h index 4da9345c1500..59efe849f351 100644 --- a/trunk/include/asm-x86_64/spinlock_types.h +++ b/trunk/include/asm-x86_64/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - unsigned int slock; + volatile unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { - unsigned int lock; + volatile unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } diff --git a/trunk/include/asm-x86_64/unistd.h b/trunk/include/asm-x86_64/unistd.h index c5f596e71faa..777288eb7e75 100644 --- a/trunk/include/asm-x86_64/unistd.h +++ b/trunk/include/asm-x86_64/unistd.h @@ -622,7 +622,25 @@ __SYSCALL(__NR_move_pages, sys_move_pages) #define __NR_syscall_max __NR_move_pages +#ifdef __KERNEL__ +#include +#endif + #ifndef __NO_STUBS + +/* user-visible error numbers are in the range -1 - -MAX_ERRNO */ + +#define __syscall_clobber "r11","rcx","memory" + +#define __syscall_return(type, res) \ +do { \ + if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \ + errno = -(res); \ + res = -1; \ + } \ + return (type) (res); \ +} while (0) + #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_SYS_ALARM @@ -646,6 +664,87 @@ __SYSCALL(__NR_move_pages, sys_move_pages) #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_TIME +#define __syscall "syscall" + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +__asm__ volatile (__syscall \ + : "=a" (__res) \ + : "0" (__NR_##name) : __syscall_clobber ); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ +long __res; \ +__asm__ volatile (__syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1,type2 arg2) \ +{ \ +long __res; \ +__asm__ volatile (__syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +__asm__ volatile (__syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ + "d" ((long)(arg3)) : __syscall_clobber); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ +long __res; \ +__asm__ volatile ("movq %5,%%r10 ;" __syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ + "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \ +__syscall_return(type,__res); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ + "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \ + __syscall_clobber,"r8","r10" ); \ +__syscall_return(type,__res); \ +} + +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ + type5,arg5,type6,arg6) \ +type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ +{ \ +long __res; \ +__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \ + : "=a" (__res) \ + : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ + "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \ + "g" ((long)(arg6)) : \ + __syscall_clobber,"r8","r10","r9" ); \ +__syscall_return(type,__res); \ +} + #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/trunk/include/asm-xtensa/dma-mapping.h b/trunk/include/asm-xtensa/dma-mapping.h index 82b03b3a2ee6..c39c91dfcc69 100644 --- a/trunk/include/asm-xtensa/dma-mapping.h +++ b/trunk/include/asm-xtensa/dma-mapping.h @@ -170,10 +170,10 @@ dma_get_cache_alignment(void) return L1_CACHE_BYTES; } -#define dma_is_consistent(d, h) (1) +#define dma_is_consistent(d) (1) static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, +dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { consistent_sync(vaddr, size, direction); diff --git a/trunk/include/asm-xtensa/unistd.h b/trunk/include/asm-xtensa/unistd.h index 2e1a1b997e7d..411f810a55c6 100644 --- a/trunk/include/asm-xtensa/unistd.h +++ b/trunk/include/asm-xtensa/unistd.h @@ -218,6 +218,190 @@ #define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/ +#ifdef __KERNEL__ +#include + +#define __syscall_return(type, res) return ((type)(res)) + +/* Tensilica's xt-xcc compiler is much more agressive at code + * optimization than gcc. Multiple __asm__ statements are + * insufficient for xt-xcc because subsequent optimization passes + * (beyond the front-end that knows of __asm__ statements and other + * such GNU Extensions to C) can modify the register selection for + * containment of C variables. + * + * xt-xcc cannot modify the contents of a single __asm__ statement, so + * we create single-asm versions of the syscall macros that are + * suitable and optimal for both xt-xcc and gcc. + * + * Linux takes system-call arguments in registers. The following + * design is optimized for user-land apps (e.g., glibc) which + * typically have a function wrapper around the "syscall" assembly + * instruction. It satisfies the Xtensa ABI while minizing argument + * shifting. + * + * The Xtensa ABI and software conventions require the system-call + * number in a2. If an argument exists in a2, we move it to the next + * available register. Note that for improved efficiency, we do NOT + * shift all parameters down one register to maintain the original + * order. + * + * At best case (zero arguments), we just write the syscall number to + * a2. At worst case (1 to 6 arguments), we move the argument in a2 + * to the next available register, then write the syscall number to + * a2. + * + * For clarity, the following truth table enumerates all possibilities. + * + * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5 + * --------- -------------- ---------------------------------- + * 0 a2 + * 1 a2 a3 + * 2 a2 a4, a3 + * 3 a2 a5, a3, a4 + * 4 a2 a6, a3, a4, a5 + * 5 a2 a7, a3, a4, a5, a6 + * 6 a2 a8, a3, a4, a5, a6, a7 + */ + +#define _syscall0(type,name) \ +type name(void) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name) \ + : "a2" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall1(type,name,type0,arg0) \ +type name(type0 arg0) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a3, %2 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0) \ + : "a2", "a3" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall2(type,name,type0,arg0,type1,arg1) \ +type name(type0 arg0,type1 arg1) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a4, %2 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1) \ + : "a2", "a3", "a4" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \ +type name(type0 arg0,type1 arg1,type2 arg2) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a5, %2 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \ + : "a2", "a3", "a4", "a5" \ + ); \ +__syscall_return(type,__res); \ +} + +#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a6, %2 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \ + : "a2", "a3", "a4", "a5", "a6" \ + ); \ +__syscall_return(type,__res); \ +} + +/* Note that we save and restore the a7 frame pointer. + * Including a7 in the clobber list doesn't do what you'd expect. + */ +#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a9, a7 \n" \ + " mov a7, %2 \n" \ + " mov a6, %6 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov a7, a9 \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \ + "a" (arg3), "a" (arg4) \ + : "a2", "a3", "a4", "a5", "a6", "a9" \ + ); \ +__syscall_return(type,__res); \ +} + +/* Note that we save and restore the a7 frame pointer. + * Including a7 in the clobber list doesn't do what you'd expect. + */ +#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ +{ \ +long __res; \ +__asm__ __volatile__ ( \ + " mov a9, a7 \n" \ + " mov a8, %2 \n" \ + " mov a7, %7 \n" \ + " mov a6, %6 \n" \ + " mov a5, %5 \n" \ + " mov a4, %4 \n" \ + " mov a3, %3 \n" \ + " movi a2, %1 \n" \ + " syscall \n" \ + " mov a7, a9 \n" \ + " mov %0, a2 \n" \ + : "=a" (__res) \ + : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \ + "a" (arg3), "a" (arg4), "a" (arg5) \ + : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \ + ); \ +__syscall_return(type,__res); \ +} + /* * "Conditional" syscalls * diff --git a/trunk/include/linux/Kbuild b/trunk/include/linux/Kbuild index e618b25b5add..ff433126361f 100644 --- a/trunk/include/linux/Kbuild +++ b/trunk/include/linux/Kbuild @@ -221,7 +221,6 @@ unifdef-y += if_bridge.h unifdef-y += if_ec.h unifdef-y += if_eql.h unifdef-y += if_ether.h -unifdef-y += if_fddi.h unifdef-y += if_frad.h unifdef-y += if_ltalk.h unifdef-y += if_pppox.h @@ -283,7 +282,6 @@ unifdef-y += nvram.h unifdef-y += parport.h unifdef-y += patchkey.h unifdef-y += pci.h -unifdef-y += personality.h unifdef-y += pktcdvd.h unifdef-y += pmu.h unifdef-y += poll.h @@ -339,7 +337,6 @@ unifdef-y += videodev.h unifdef-y += wait.h unifdef-y += wanrouter.h unifdef-y += watchdog.h -unifdef-y += wireless.h unifdef-y += xfrm.h objhdr-y += version.h diff --git a/trunk/include/linux/aio.h b/trunk/include/linux/aio.h index 3372ec6bf53a..0d71c0041f13 100644 --- a/trunk/include/linux/aio.h +++ b/trunk/include/linux/aio.h @@ -111,6 +111,7 @@ struct kiocb { size_t ki_nbytes; /* copy of iocb->aio_nbytes */ char __user *ki_buf; /* remaining iocb->aio_buf */ size_t ki_left; /* remaining bytes */ + long ki_retried; /* just for testing */ struct iovec ki_inline_vec; /* inline vector */ struct iovec *ki_iovec; unsigned long ki_nr_segs; @@ -193,7 +194,7 @@ struct kioctx { struct aio_ring_info ring_info; - struct delayed_work wq; + struct work_struct wq; }; /* prototypes */ @@ -237,6 +238,7 @@ do { \ } while (0) #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) +#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1) #include diff --git a/trunk/include/linux/bootmem.h b/trunk/include/linux/bootmem.h index 2275f2748708..31e9abb6d977 100644 --- a/trunk/include/linux/bootmem.h +++ b/trunk/include/linux/bootmem.h @@ -119,7 +119,8 @@ extern void *alloc_large_system_hash(const char *tablename, unsigned int *_hash_mask, unsigned long limit); -#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_HIGHMEM 0x00000001 /* Consider highmem? */ +#define HASH_EARLY 0x00000002 /* Allocating during early boot? */ /* Only NUMA needs hash distribution. * IA64 is known to have sufficient vmalloc space. diff --git a/trunk/include/linux/bottom_half.h b/trunk/include/linux/bottom_half.h deleted file mode 100644 index 777dbf695d44..000000000000 --- a/trunk/include/linux/bottom_half.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LINUX_BH_H -#define _LINUX_BH_H - -extern void local_bh_disable(void); -extern void __local_bh_enable(void); -extern void _local_bh_enable(void); -extern void local_bh_enable(void); -extern void local_bh_enable_ip(unsigned long ip); - -#endif /* _LINUX_BH_H */ diff --git a/trunk/include/linux/carta_random32.h b/trunk/include/linux/carta_random32.h new file mode 100644 index 000000000000..f6f3bd9f20b5 --- /dev/null +++ b/trunk/include/linux/carta_random32.h @@ -0,0 +1,29 @@ +/* + * Fast, simple, yet decent quality random number generator based on + * a paper by David G. Carta ("Two Fast Implementations of the + * `Minimal Standard' Random Number Generator," Communications of the + * ACM, January, 1990). + * + * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P. + * Contributed by Stephane Eranian + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307 USA + */ +#ifndef _LINUX_CARTA_RANDOM32_H_ +#define _LINUX_CARTA_RANDOM32_H_ + +u64 carta_random32(u64 seed); + +#endif /* _LINUX_CARTA_RANDOM32_H_ */ diff --git a/trunk/include/linux/cciss_ioctl.h b/trunk/include/linux/cciss_ioctl.h index cb57c30081a8..6e27f42e3a57 100644 --- a/trunk/include/linux/cciss_ioctl.h +++ b/trunk/include/linux/cciss_ioctl.h @@ -80,7 +80,7 @@ typedef __u32 DriverVer_type; #define HWORD __u16 #define DWORD __u32 -#define CISS_MAX_LUN 1024 +#define CISS_MAX_LUN 16 #define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping #define LEVEL3LUN 0 diff --git a/trunk/include/linux/cdev.h b/trunk/include/linux/cdev.h index f309b00e986e..ee5f53f2ca15 100644 --- a/trunk/include/linux/cdev.h +++ b/trunk/include/linux/cdev.h @@ -2,10 +2,6 @@ #define _LINUX_CDEV_H #ifdef __KERNEL__ -#include -#include -#include - struct cdev { struct kobject kobj; struct module *owner; diff --git a/trunk/include/linux/connector.h b/trunk/include/linux/connector.h index 3ea1cd58de97..4c02119c6ab9 100644 --- a/trunk/include/linux/connector.h +++ b/trunk/include/linux/connector.h @@ -133,7 +133,7 @@ struct cn_callback_data { struct cn_callback_entry { struct list_head callback_entry; struct cn_callback *cb; - struct delayed_work work; + struct work_struct work; struct cn_queue_dev *pdev; struct cn_callback_id id; @@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); int cn_cb_equal(struct cb_id *, struct cb_id *); -void cn_queue_wrapper(struct work_struct *work); +void cn_queue_wrapper(void *data); extern int cn_already_initialized; diff --git a/trunk/include/linux/cpu.h b/trunk/include/linux/cpu.h index bf00ce6ecadf..f02d71bf6894 100644 --- a/trunk/include/linux/cpu.h +++ b/trunk/include/linux/cpu.h @@ -24,7 +24,6 @@ #include #include #include -#include struct cpu { int node_id; /* The node which contains the CPU */ @@ -75,17 +74,6 @@ extern struct sysdev_class cpu_sysdev_class; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ - -static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) -{ - mutex_lock(cpu_hp_mutex); -} - -static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) -{ - mutex_unlock(cpu_hp_mutex); -} - extern void lock_cpu_hotplug(void); extern void unlock_cpu_hotplug(void); #define hotcpu_notifier(fn, pri) { \ @@ -97,24 +85,17 @@ extern void unlock_cpu_hotplug(void); #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) - -#else /* CONFIG_HOTPLUG_CPU */ - -static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) -{ } -static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) -{ } - +#else #define lock_cpu_hotplug() do { } while (0) #define unlock_cpu_hotplug() do { } while (0) #define lock_cpu_hotplug_interruptible() 0 -#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) -#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0) -#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0) +#define hotcpu_notifier(fn, pri) do { } while (0) +#define register_hotcpu_notifier(nb) do { } while (0) +#define unregister_hotcpu_notifier(nb) do { } while (0) /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ static inline int cpu_is_offline(int cpu) { return 0; } -#endif /* CONFIG_HOTPLUG_CPU */ +#endif #ifdef CONFIG_SUSPEND_SMP extern int disable_nonboot_cpus(void); diff --git a/trunk/include/linux/cpuset.h b/trunk/include/linux/cpuset.h index 8821e1f75b44..4d8adf663681 100644 --- a/trunk/include/linux/cpuset.h +++ b/trunk/include/linux/cpuset.h @@ -23,7 +23,6 @@ extern void cpuset_fork(struct task_struct *p); extern void cpuset_exit(struct task_struct *p); extern cpumask_t cpuset_cpus_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); -#define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); void cpuset_update_task_memory_state(void); #define cpuset_nodes_subset_current_mems_allowed(nodes) \ @@ -46,7 +45,7 @@ extern int cpuset_excl_nodes_overlap(const struct task_struct *p); extern int cpuset_memory_pressure_enabled; extern void __cpuset_memory_pressure_bump(void); -extern const struct file_operations proc_cpuset_operations; +extern struct file_operations proc_cpuset_operations; extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); extern void cpuset_lock(void); @@ -84,7 +83,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) return node_possible_map; } -#define cpuset_current_mems_allowed (node_online_map) static inline void cpuset_init_current_mems_allowed(void) {} static inline void cpuset_update_task_memory_state(void) {} #define cpuset_nodes_subset_current_mems_allowed(nodes) (1) diff --git a/trunk/include/linux/debug_locks.h b/trunk/include/linux/debug_locks.h index a1c10b0c4cf0..952bee79a8f3 100644 --- a/trunk/include/linux/debug_locks.h +++ b/trunk/include/linux/debug_locks.h @@ -24,7 +24,7 @@ extern int debug_locks_off(void); int __ret = 0; \ \ if (unlikely(c)) { \ - if (debug_locks_silent || debug_locks_off()) \ + if (debug_locks_off()) \ WARN_ON(1); \ __ret = 1; \ } \ diff --git a/trunk/include/linux/delayacct.h b/trunk/include/linux/delayacct.h index 55d1ca5e60f5..561e2a77805c 100644 --- a/trunk/include/linux/delayacct.h +++ b/trunk/include/linux/delayacct.h @@ -30,7 +30,7 @@ #ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ -extern struct kmem_cache *delayacct_cache; +extern kmem_cache_t *delayacct_cache; extern void delayacct_init(void); extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); diff --git a/trunk/include/linux/device.h b/trunk/include/linux/device.h index 49ab53ce92dc..583a341e016c 100644 --- a/trunk/include/linux/device.h +++ b/trunk/include/linux/device.h @@ -371,9 +371,6 @@ struct device { core doesn't touch it */ struct dev_pm_info power; -#ifdef CONFIG_NUMA - int numa_node; /* NUMA node this device is close to */ -#endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as @@ -397,25 +394,6 @@ struct device { void (*release)(struct device * dev); }; -#ifdef CONFIG_NUMA -static inline int dev_to_node(struct device *dev) -{ - return dev->numa_node; -} -static inline void set_dev_node(struct device *dev, int node) -{ - dev->numa_node = node; -} -#else -static inline int dev_to_node(struct device *dev) -{ - return -1; -} -static inline void set_dev_node(struct device *dev, int node) -{ -} -#endif - static inline void * dev_get_drvdata (struct device *dev) { diff --git a/trunk/include/linux/elf.h b/trunk/include/linux/elf.h index 60713e6ea297..743d5c8e6d36 100644 --- a/trunk/include/linux/elf.h +++ b/trunk/include/linux/elf.h @@ -6,8 +6,6 @@ #include #include -struct file; - #ifndef elf_read_implies_exec /* Executables for which elf_read_implies_exec() returns TRUE will have the READ_IMPLIES_EXEC personality flag set automatically. @@ -360,7 +358,6 @@ extern Elf32_Dyn _DYNAMIC []; #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_note elf32_note -#define elf_addr_t Elf32_Off #else @@ -368,7 +365,6 @@ extern Elf64_Dyn _DYNAMIC []; #define elfhdr elf64_hdr #define elf_phdr elf64_phdr #define elf_note elf64_note -#define elf_addr_t Elf64_Off #endif diff --git a/trunk/include/linux/ext3_jbd.h b/trunk/include/linux/ext3_jbd.h index 8c43b13a02fe..ce0e6109aff0 100644 --- a/trunk/include/linux/ext3_jbd.h +++ b/trunk/include/linux/ext3_jbd.h @@ -109,32 +109,74 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode); * been done yet. */ -static inline void ext3_journal_release_buffer(handle_t *handle, - struct buffer_head *bh) +void ext3_journal_abort_handle(const char *caller, const char *err_fn, + struct buffer_head *bh, handle_t *handle, int err); + +static inline int +__ext3_journal_get_undo_access(const char *where, handle_t *handle, + struct buffer_head *bh) { - journal_release_buffer(handle, bh); + int err = journal_get_undo_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; } -void ext3_journal_abort_handle(const char *caller, const char *err_fn, - struct buffer_head *bh, handle_t *handle, int err); +static inline int +__ext3_journal_get_write_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = journal_get_write_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext3_journal_get_undo_access(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline void +ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh) +{ + journal_release_buffer(handle, bh); +} -int __ext3_journal_get_write_access(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline int +__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh) +{ + int err = journal_forget(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext3_journal_forget(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline int +__ext3_journal_revoke(const char *where, handle_t *handle, + unsigned long blocknr, struct buffer_head *bh) +{ + int err = journal_revoke(handle, blocknr, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext3_journal_revoke(const char *where, handle_t *handle, - unsigned long blocknr, struct buffer_head *bh); +static inline int +__ext3_journal_get_create_access(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = journal_get_create_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext3_journal_get_create_access(const char *where, - handle_t *handle, struct buffer_head *bh); +static inline int +__ext3_journal_dirty_metadata(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = journal_dirty_metadata(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext3_journal_dirty_metadata(const char *where, - handle_t *handle, struct buffer_head *bh); #define ext3_journal_get_undo_access(handle, bh) \ __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh)) diff --git a/trunk/include/linux/ext4_jbd2.h b/trunk/include/linux/ext4_jbd2.h index d716e6392cf6..72dd631912e4 100644 --- a/trunk/include/linux/ext4_jbd2.h +++ b/trunk/include/linux/ext4_jbd2.h @@ -114,32 +114,74 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode); * been done yet. */ -static inline void ext4_journal_release_buffer(handle_t *handle, - struct buffer_head *bh) +void ext4_journal_abort_handle(const char *caller, const char *err_fn, + struct buffer_head *bh, handle_t *handle, int err); + +static inline int +__ext4_journal_get_undo_access(const char *where, handle_t *handle, + struct buffer_head *bh) { - jbd2_journal_release_buffer(handle, bh); + int err = jbd2_journal_get_undo_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; } -void ext4_journal_abort_handle(const char *caller, const char *err_fn, - struct buffer_head *bh, handle_t *handle, int err); +static inline int +__ext4_journal_get_write_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = jbd2_journal_get_write_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext4_journal_get_undo_access(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline void +ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh) +{ + jbd2_journal_release_buffer(handle, bh); +} -int __ext4_journal_get_write_access(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline int +__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh) +{ + int err = jbd2_journal_forget(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext4_journal_forget(const char *where, handle_t *handle, - struct buffer_head *bh); +static inline int +__ext4_journal_revoke(const char *where, handle_t *handle, + ext4_fsblk_t blocknr, struct buffer_head *bh) +{ + int err = jbd2_journal_revoke(handle, blocknr, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext4_journal_revoke(const char *where, handle_t *handle, - ext4_fsblk_t blocknr, struct buffer_head *bh); +static inline int +__ext4_journal_get_create_access(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = jbd2_journal_get_create_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext4_journal_get_create_access(const char *where, - handle_t *handle, struct buffer_head *bh); +static inline int +__ext4_journal_dirty_metadata(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = jbd2_journal_dirty_metadata(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} -int __ext4_journal_dirty_metadata(const char *where, - handle_t *handle, struct buffer_head *bh); #define ext4_journal_get_undo_access(handle, bh) \ __ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh)) diff --git a/trunk/include/linux/file.h b/trunk/include/linux/file.h index 6e77b9177f9e..74183e6f7f45 100644 --- a/trunk/include/linux/file.h +++ b/trunk/include/linux/file.h @@ -64,8 +64,6 @@ struct files_struct { #define files_fdtable(files) (rcu_dereference((files)->fdt)) -extern struct kmem_cache *filp_cachep; - extern void FASTCALL(__fput(struct file *)); extern void FASTCALL(fput(struct file *)); @@ -116,6 +114,4 @@ struct files_struct *get_files_struct(struct task_struct *); void FASTCALL(put_files_struct(struct files_struct *fs)); void reset_files_struct(struct task_struct *, struct files_struct *); -extern struct kmem_cache *files_cachep; - #endif /* __LINUX_FILE_H */ diff --git a/trunk/include/linux/freezer.h b/trunk/include/linux/freezer.h deleted file mode 100644 index 6e05e3e7ce39..000000000000 --- a/trunk/include/linux/freezer.h +++ /dev/null @@ -1,87 +0,0 @@ -/* Freezer declarations */ - -#ifdef CONFIG_PM -/* - * Check if a process has been frozen - */ -static inline int frozen(struct task_struct *p) -{ - return p->flags & PF_FROZEN; -} - -/* - * Check if there is a request to freeze a process - */ -static inline int freezing(struct task_struct *p) -{ - return p->flags & PF_FREEZE; -} - -/* - * Request that a process be frozen - * FIXME: SMP problem. We may not modify other process' flags! - */ -static inline void freeze(struct task_struct *p) -{ - p->flags |= PF_FREEZE; -} - -/* - * Sometimes we may need to cancel the previous 'freeze' request - */ -static inline void do_not_freeze(struct task_struct *p) -{ - p->flags &= ~PF_FREEZE; -} - -/* - * Wake up a frozen process - */ -static inline int thaw_process(struct task_struct *p) -{ - if (frozen(p)) { - p->flags &= ~PF_FROZEN; - wake_up_process(p); - return 1; - } - return 0; -} - -/* - * freezing is complete, mark process as frozen - */ -static inline void frozen_process(struct task_struct *p) -{ - p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; -} - -extern void refrigerator(void); -extern int freeze_processes(void); -extern void thaw_processes(void); - -static inline int try_to_freeze(void) -{ - if (freezing(current)) { - refrigerator(); - return 1; - } else - return 0; -} - -extern void thaw_some_processes(int all); - -#else -static inline int frozen(struct task_struct *p) { return 0; } -static inline int freezing(struct task_struct *p) { return 0; } -static inline void freeze(struct task_struct *p) { BUG(); } -static inline int thaw_process(struct task_struct *p) { return 1; } -static inline void frozen_process(struct task_struct *p) { BUG(); } - -static inline void refrigerator(void) {} -static inline int freeze_processes(void) { BUG(); return 0; } -static inline void thaw_processes(void) {} - -static inline int try_to_freeze(void) { return 0; } - - -#endif diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 70b99fbb560b..cac7b1ef9543 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -543,22 +543,19 @@ struct inode { struct list_head i_dentry; unsigned long i_ino; atomic_t i_count; + umode_t i_mode; unsigned int i_nlink; uid_t i_uid; gid_t i_gid; dev_t i_rdev; - unsigned long i_version; loff_t i_size; -#ifdef __NEED_I_SIZE_ORDERED - seqcount_t i_size_seqcount; -#endif struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; unsigned int i_blkbits; + unsigned long i_version; blkcnt_t i_blocks; unsigned short i_bytes; - umode_t i_mode; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ struct mutex i_mutex; struct rw_semaphore i_alloc_sem; @@ -601,6 +598,9 @@ struct inode { void *i_security; #endif void *i_private; /* fs or device private pointer */ +#ifdef __NEED_I_SIZE_ORDERED + seqcount_t i_size_seqcount; +#endif }; /* @@ -636,7 +636,7 @@ extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); * cmpxchg8b without the need of the lock prefix). For SMP compiles * and 64bit archs it makes no difference if preempt is enabled or not. */ -static inline loff_t i_size_read(const struct inode *inode) +static inline loff_t i_size_read(struct inode *inode) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) loff_t i_size; @@ -679,12 +679,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size) #endif } -static inline unsigned iminor(const struct inode *inode) +static inline unsigned iminor(struct inode *inode) { return MINOR(inode->i_rdev); } -static inline unsigned imajor(const struct inode *inode) +static inline unsigned imajor(struct inode *inode) { return MAJOR(inode->i_rdev); } @@ -1481,9 +1481,7 @@ extern char * getname(const char __user *); extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(unsigned long); -extern struct kmem_cache *names_cachep; - -#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) +#define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifndef CONFIG_AUDITSYSCALL #define putname(name) __putname(name) diff --git a/trunk/include/linux/fs_struct.h b/trunk/include/linux/fs_struct.h index 11a36ceddf73..c623d12a486e 100644 --- a/trunk/include/linux/fs_struct.h +++ b/trunk/include/linux/fs_struct.h @@ -18,8 +18,6 @@ struct fs_struct { .umask = 0022, \ } -extern struct kmem_cache *fs_cachep; - extern void exit_fs(struct task_struct *); extern void set_fs_altroot(void); extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *); diff --git a/trunk/include/linux/fuse.h b/trunk/include/linux/fuse.h index 534744efe30d..9fc48a674b82 100644 --- a/trunk/include/linux/fuse.h +++ b/trunk/include/linux/fuse.h @@ -15,7 +15,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 8 +#define FUSE_KERNEL_MINOR_VERSION 7 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -92,11 +92,6 @@ struct fuse_file_lock { #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) -/** - * Release flags - */ -#define FUSE_RELEASE_FLUSH (1 << 0) - enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -132,8 +127,6 @@ enum fuse_opcode { FUSE_ACCESS = 34, FUSE_CREATE = 35, FUSE_INTERRUPT = 36, - FUSE_BMAP = 37, - FUSE_DESTROY = 38, }; /* The read buffer is required to be at least 8k, but may be much larger */ @@ -212,13 +205,12 @@ struct fuse_open_out { struct fuse_release_in { __u64 fh; __u32 flags; - __u32 release_flags; - __u64 lock_owner; + __u32 padding; }; struct fuse_flush_in { __u64 fh; - __u32 unused; + __u32 flush_flags; __u32 padding; __u64 lock_owner; }; @@ -304,16 +296,6 @@ struct fuse_interrupt_in { __u64 unique; }; -struct fuse_bmap_in { - __u64 block; - __u32 blocksize; - __u32 padding; -}; - -struct fuse_bmap_out { - __u64 block; -}; - struct fuse_in_header { __u32 len; __u32 opcode; diff --git a/trunk/include/linux/gfp.h b/trunk/include/linux/gfp.h index 00c314aedab7..bf2b6bc3f6fd 100644 --- a/trunk/include/linux/gfp.h +++ b/trunk/include/linux/gfp.h @@ -116,9 +116,6 @@ static inline enum zone_type gfp_zone(gfp_t flags) #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } #endif -#ifndef HAVE_ARCH_ALLOC_PAGE -static inline void arch_alloc_page(struct page *page, int order) { } -#endif extern struct page * FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); diff --git a/trunk/include/linux/highmem.h b/trunk/include/linux/highmem.h index 3d8768b619e9..fd7d12daa94f 100644 --- a/trunk/include/linux/highmem.h +++ b/trunk/include/linux/highmem.h @@ -3,7 +3,6 @@ #include #include -#include #include @@ -42,10 +41,9 @@ static inline void *kmap(struct page *page) #define kunmap(page) do { (void) (page); } while (0) -#define kmap_atomic(page, idx) \ - ({ pagefault_disable(); page_address(page); }) -#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) -#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) +#define kmap_atomic(page, idx) page_address(page) +#define kunmap_atomic(addr, idx) do { } while (0) +#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif diff --git a/trunk/include/linux/hugetlb.h b/trunk/include/linux/hugetlb.h index a60995afe334..ace64e57e17f 100644 --- a/trunk/include/linux/hugetlb.h +++ b/trunk/include/linux/hugetlb.h @@ -35,7 +35,6 @@ extern int sysctl_hugetlb_shm_group; pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, diff --git a/trunk/include/linux/i2o.h b/trunk/include/linux/i2o.h index 52f53e2e70c3..c115e9e840b4 100644 --- a/trunk/include/linux/i2o.h +++ b/trunk/include/linux/i2o.h @@ -461,7 +461,7 @@ struct i2o_driver { int (*reply) (struct i2o_controller *, u32, struct i2o_message *); /* Event handler */ - work_func_t event; + void (*event) (struct i2o_event *); struct workqueue_struct *event_queue; /* Event queue */ @@ -490,7 +490,7 @@ struct i2o_dma { */ struct i2o_pool { char *name; - struct kmem_cache *slab; + kmem_cache_t *slab; mempool_t *mempool; }; @@ -986,8 +986,7 @@ extern void i2o_driver_unregister(struct i2o_driver *); /** * i2o_driver_notify_controller_add - Send notification of added controller - * @drv: I2O driver - * @c: I2O controller + * to a single I2O driver * * Send notification of added controller to a single registered driver. */ @@ -999,9 +998,8 @@ static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv, }; /** - * i2o_driver_notify_controller_remove - Send notification of removed controller - * @drv: I2O driver - * @c: I2O controller + * i2o_driver_notify_controller_remove - Send notification of removed + * controller to a single I2O driver * * Send notification of removed controller to a single registered driver. */ @@ -1013,9 +1011,8 @@ static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv, }; /** - * i2o_driver_notify_device_add - Send notification of added device - * @drv: I2O driver - * @i2o_dev: the added i2o_device + * i2o_driver_notify_device_add - Send notification of added device to a + * single I2O driver * * Send notification of added device to a single registered driver. */ @@ -1028,8 +1025,7 @@ static inline void i2o_driver_notify_device_add(struct i2o_driver *drv, /** * i2o_driver_notify_device_remove - Send notification of removed device - * @drv: I2O driver - * @i2o_dev: the added i2o_device + * to a single I2O driver * * Send notification of removed device to a single registered driver. */ @@ -1152,7 +1148,7 @@ static inline void i2o_msg_post(struct i2o_controller *c, /** * i2o_msg_post_wait - Post and wait a message and wait until return * @c: controller - * @msg: message to post + * @m: message to post * @timeout: time in seconds to wait * * This API allows an OSM to post a message and then be told whether or diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 733790d4f7db..33c5daacc743 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -73,7 +73,7 @@ extern struct nsproxy init_nsproxy; #define INIT_NSPROXY(nsproxy) { \ .count = ATOMIC_INIT(1), \ - .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \ + .nslock = SPIN_LOCK_UNLOCKED, \ .uts_ns = &init_uts_ns, \ .namespace = NULL, \ INIT_IPC_NS(ipc_ns) \ diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index de7593f4e895..5b83e7b59621 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -218,6 +217,12 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ +extern void local_bh_disable(void); +extern void __local_bh_enable(void); +extern void _local_bh_enable(void); +extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip); + /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes tasklets are more than enough. F.e. all serial device BHs et diff --git a/trunk/include/linux/ipmi.h b/trunk/include/linux/ipmi.h index 7a9db390c56a..796ca009fd46 100644 --- a/trunk/include/linux/ipmi.h +++ b/trunk/include/linux/ipmi.h @@ -208,15 +208,6 @@ struct kernel_ipmi_msg code as the first byte of the incoming data, unlike a response. */ -/* - * Modes for ipmi_set_maint_mode() and the userland IOCTL. The AUTO - * setting is the default and means it will be set on certain - * commands. Hard setting it on and off will override automatic - * operation. - */ -#define IPMI_MAINTENANCE_MODE_AUTO 0 -#define IPMI_MAINTENANCE_MODE_OFF 1 -#define IPMI_MAINTENANCE_MODE_ON 2 #ifdef __KERNEL__ @@ -382,35 +373,6 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, unsigned char cmd, unsigned int chans); -/* - * Go into a mode where the driver will not autonomously attempt to do - * things with the interface. It will still respond to attentions and - * interrupts, and it will expect that commands will complete. It - * will not automatcially check for flags, events, or things of that - * nature. - * - * This is primarily used for firmware upgrades. The idea is that - * when you go into firmware upgrade mode, you do this operation - * and the driver will not attempt to do anything but what you tell - * it or what the BMC asks for. - * - * Note that if you send a command that resets the BMC, the driver - * will still expect a response from that command. So the BMC should - * reset itself *after* the response is sent. Resetting before the - * response is just silly. - * - * If in auto maintenance mode, the driver will automatically go into - * maintenance mode for 30 seconds if it sees a cold reset, a warm - * reset, or a firmware NetFN. This means that code that uses only - * firmware NetFN commands to do upgrades will work automatically - * without change, assuming it sends a message every 30 seconds or - * less. - * - * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. - */ -int ipmi_get_maintenance_mode(ipmi_user_t user); -int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); - /* * Allow run-to-completion mode to be set for the interface of * a specific user. @@ -694,11 +656,4 @@ struct ipmi_timing_parms #define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \ struct ipmi_timing_parms) -/* - * Set the maintenance mode. See ipmi_set_maintenance_mode() above - * for a description of what this does. - */ -#define IPMICTL_GET_MAINTENANCE_MODE_CMD _IOR(IPMI_IOC_MAGIC, 30, int) -#define IPMICTL_SET_MAINTENANCE_MODE_CMD _IOW(IPMI_IOC_MAGIC, 31, int) - #endif /* __LINUX_IPMI_H */ diff --git a/trunk/include/linux/ipmi_msgdefs.h b/trunk/include/linux/ipmi_msgdefs.h index b56a158d587a..4d04d8b58a0a 100644 --- a/trunk/include/linux/ipmi_msgdefs.h +++ b/trunk/include/linux/ipmi_msgdefs.h @@ -46,8 +46,6 @@ #define IPMI_NETFN_APP_REQUEST 0x06 #define IPMI_NETFN_APP_RESPONSE 0x07 #define IPMI_GET_DEVICE_ID_CMD 0x01 -#define IPMI_COLD_RESET_CMD 0x02 -#define IPMI_WARM_RESET_CMD 0x03 #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 #define IPMI_GET_DEVICE_GUID_CMD 0x08 #define IPMI_GET_MSG_FLAGS_CMD 0x31 @@ -62,27 +60,20 @@ #define IPMI_NETFN_STORAGE_RESPONSE 0x0b #define IPMI_ADD_SEL_ENTRY_CMD 0x44 -#define IPMI_NETFN_FIRMWARE_REQUEST 0x08 -#define IPMI_NETFN_FIRMWARE_RESPONSE 0x09 - /* The default slave address */ #define IPMI_BMC_SLAVE_ADDR 0x20 /* The BT interface on high-end HP systems supports up to 255 bytes in * one transfer. Its "virtual" BMC supports some commands that are longer * than 128 bytes. Use the full 256, plus NetFn/LUN, Cmd, cCode, plus - * some overhead; it's not worth the effort to dynamically size this based - * on the results of the "Get BT Capabilities" command. */ + * some overhead. It would be nice to base this on the "BT Capabilities" + * but that's too hard to propagate to the rest of the driver. */ #define IPMI_MAX_MSG_LENGTH 272 /* multiple of 16 */ #define IPMI_CC_NO_ERROR 0x00 #define IPMI_NODE_BUSY_ERR 0xc0 #define IPMI_INVALID_COMMAND_ERR 0xc1 -#define IPMI_TIMEOUT_ERR 0xc3 #define IPMI_ERR_MSG_TRUNCATED 0xc6 -#define IPMI_REQ_LEN_INVALID_ERR 0xc7 -#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8 -#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */ #define IPMI_LOST_ARBITRATION_ERR 0x81 #define IPMI_BUS_ERR 0x82 #define IPMI_NAK_ON_WRITE_ERR 0x83 diff --git a/trunk/include/linux/ipmi_smi.h b/trunk/include/linux/ipmi_smi.h index c0633108d05d..6d9c7e4da472 100644 --- a/trunk/include/linux/ipmi_smi.h +++ b/trunk/include/linux/ipmi_smi.h @@ -115,13 +115,6 @@ struct ipmi_smi_handlers poll for operations during things like crash dumps. */ void (*poll)(void *send_info); - /* Enable/disable firmware maintenance mode. Note that this - is *not* the modes defined, this is simply an on/off - setting. The message handler does the mode handling. Note - that this is called from interupt context, so it cannot - block. */ - void (*set_maintenance_mode)(void *send_info, int enable); - /* Tell the handler that we are using it/not using it. The message handler get the modules that this handler belongs to; this function lets the SMI claim any modules that it @@ -180,7 +173,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *dev, - const char *sysfs_name, unsigned char slave_addr); /* diff --git a/trunk/include/linux/jbd.h b/trunk/include/linux/jbd.h index 452737551260..fe89444b1c6f 100644 --- a/trunk/include/linux/jbd.h +++ b/trunk/include/linux/jbd.h @@ -839,6 +839,7 @@ struct journal_s */ /* Filing buffers */ +extern void __journal_temp_unlink_buffer(struct journal_head *jh); extern void journal_unfile_buffer(journal_t *, struct journal_head *); extern void __journal_unfile_buffer(struct journal_head *); extern void __journal_refile_buffer(struct journal_head *); @@ -948,7 +949,7 @@ void journal_put_journal_head(struct journal_head *jh); /* * handle management */ -extern struct kmem_cache *jbd_handle_cache; +extern kmem_cache_t *jbd_handle_cache; static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) { diff --git a/trunk/include/linux/jbd2.h b/trunk/include/linux/jbd2.h index 0e0fedd2039a..ddb128795781 100644 --- a/trunk/include/linux/jbd2.h +++ b/trunk/include/linux/jbd2.h @@ -848,6 +848,7 @@ struct journal_s */ /* Filing buffers */ +extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_unfile_buffer(struct journal_head *); extern void __jbd2_journal_refile_buffer(struct journal_head *); @@ -957,7 +958,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); /* * handle management */ -extern struct kmem_cache *jbd2_handle_cache; +extern kmem_cache_t *jbd2_handle_cache; static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) { diff --git a/trunk/include/linux/kbd_kern.h b/trunk/include/linux/kbd_kern.h index 06c58c423fe1..efe0ee4cc80b 100644 --- a/trunk/include/linux/kbd_kern.h +++ b/trunk/include/linux/kbd_kern.h @@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t) if (t->buf.tail != NULL) t->buf.tail->commit = t->buf.tail->used; spin_unlock_irqrestore(&t->buf.lock, flags); - schedule_delayed_work(&t->buf.work, 0); + schedule_work(&t->buf.work); } #endif diff --git a/trunk/include/linux/kexec.h b/trunk/include/linux/kexec.h index e3abcec6c51c..a4ede62b339d 100644 --- a/trunk/include/linux/kexec.h +++ b/trunk/include/linux/kexec.h @@ -105,7 +105,6 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); -void crash_save_cpu(struct pt_regs *regs, int cpu); extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; diff --git a/trunk/include/linux/kprobes.h b/trunk/include/linux/kprobes.h index 769be39b9681..ac4c0559f751 100644 --- a/trunk/include/linux/kprobes.h +++ b/trunk/include/linux/kprobes.h @@ -165,7 +165,7 @@ extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern kprobe_opcode_t *get_insn_slot(void); -extern void free_insn_slot(kprobe_opcode_t *slot, int dirty); +extern void free_insn_slot(kprobe_opcode_t *slot); extern void kprobes_inc_nmissed_count(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ diff --git a/trunk/include/linux/ktime.h b/trunk/include/linux/ktime.h index 611f17f79eef..84eeecd60a02 100644 --- a/trunk/include/linux/ktime.h +++ b/trunk/include/linux/ktime.h @@ -248,9 +248,9 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt) * * Returns the scalar nanoseconds representation of kt */ -static inline s64 ktime_to_ns(const ktime_t kt) +static inline u64 ktime_to_ns(const ktime_t kt) { - return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; + return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; } #endif diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h index ab2754830322..202283b5df96 100644 --- a/trunk/include/linux/libata.h +++ b/trunk/include/linux/libata.h @@ -575,9 +575,8 @@ struct ata_port { struct ata_host *host; struct device *dev; - void *port_task_data; - struct delayed_work port_task; - struct delayed_work hotplug_task; + struct work_struct port_task; + struct work_struct hotplug_task; struct work_struct scsi_rescan_task; unsigned int hsm_task_state; @@ -756,7 +755,7 @@ extern void ata_host_resume(struct ata_host *host); extern int ata_ratelimit(void); extern int ata_busy_sleep(struct ata_port *ap, unsigned long timeout_pat, unsigned long timeout); -extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, +extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, unsigned long delay); extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, unsigned long interval_msec, diff --git a/trunk/include/linux/lockd/lockd.h b/trunk/include/linux/lockd/lockd.h index 8c39654549d8..862d9730a60d 100644 --- a/trunk/include/linux/lockd/lockd.h +++ b/trunk/include/linux/lockd/lockd.h @@ -164,12 +164,14 @@ void nlmclnt_next_cookie(struct nlm_cookie *); */ struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int); struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int); +struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int); struct rpc_clnt * nlm_bind_host(struct nlm_host *); void nlm_rebind_host(struct nlm_host *); struct nlm_host * nlm_get_host(struct nlm_host *); void nlm_release_host(struct nlm_host *); void nlm_shutdown_hosts(void); extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32); +struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int); void nsm_release(struct nsm_handle *); diff --git a/trunk/include/linux/lockdep.h b/trunk/include/linux/lockdep.h index 498bfbd3b4e1..819f08f1310d 100644 --- a/trunk/include/linux/lockdep.h +++ b/trunk/include/linux/lockdep.h @@ -193,6 +193,7 @@ extern void lockdep_free_key_range(void *start, unsigned long size); extern void lockdep_off(void); extern void lockdep_on(void); +extern int lockdep_internal(void); /* * These methods are used by specific locking variants (spinlocks, @@ -242,8 +243,6 @@ extern void lock_release(struct lockdep_map *lock, int nested, # define INIT_LOCKDEP .lockdep_recursion = 0, -#define lockdep_depth(tsk) ((tsk)->lockdep_depth) - #else /* !LOCKDEP */ static inline void lockdep_off(void) @@ -254,6 +253,11 @@ static inline void lockdep_on(void) { } +static inline int lockdep_internal(void) +{ + return 0; +} + # define lock_acquire(l, s, t, r, c, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) # define lockdep_init() do { } while (0) @@ -273,9 +277,6 @@ static inline void lockdep_on(void) * The class key takes no space if lockdep is disabled: */ struct lock_class_key { }; - -#define lockdep_depth(tsk) (0) - #endif /* !LOCKDEP */ #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index a17b147c61e7..d538de901965 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -114,8 +114,6 @@ struct vm_area_struct { #endif }; -extern struct kmem_cache *vm_area_cachep; - /* * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is * disabled, then there's a single shared list of VMAs maintained by the @@ -295,24 +293,6 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -/* - * Compound pages have a destructor function. Provide a - * prototype for that function and accessor functions. - * These are _only_ valid on the head of a PG_compound page. - */ -typedef void compound_page_dtor(struct page *); - -static inline void set_compound_page_dtor(struct page *page, - compound_page_dtor *dtor) -{ - page[1].lru.next = (void *)dtor; -} - -static inline compound_page_dtor *get_compound_page_dtor(struct page *page) -{ - return (compound_page_dtor *)page[1].lru.next; -} - /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of @@ -416,9 +396,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. */ -#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) -#define NODE_NOT_IN_PAGE_FLAGS -#endif +#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) #ifndef PFN_SECTION_SHIFT #define PFN_SECTION_SHIFT 0 @@ -433,18 +411,13 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) -/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ -#ifdef NODE_NOT_IN_PAGEFLAGS -#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) -#else -#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) -#endif - -#if ZONES_WIDTH > 0 -#define ZONEID_PGSHIFT ZONES_PGSHIFT +/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ +#if FLAGS_HAS_NODE +#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) #else -#define ZONEID_PGSHIFT NODES_PGOFF +#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #endif +#define ZONETABLE_PGSHIFT ZONES_PGSHIFT #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED @@ -453,28 +426,26 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) -#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) +#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) static inline enum zone_type page_zonenum(struct page *page) { return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } -/* - * The identification function is only used by the buddy allocator for - * determining if two pages could be buddies. We are not really - * identifying a zone since we could be using a the section number - * id if we have not node id available in page flags. - * We guarantee only that it will return the same value for two - * combinable pages in a zone. - */ +struct zone; +extern struct zone *zone_table[]; + static inline int page_zone_id(struct page *page) { - BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK); - return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; + return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK; +} +static inline struct zone *page_zone(struct page *page) +{ + return zone_table[page_zone_id(page)]; } -static inline int zone_to_nid(struct zone *zone) +static inline unsigned long zone_to_nid(struct zone *zone) { #ifdef CONFIG_NUMA return zone->node; @@ -483,20 +454,13 @@ static inline int zone_to_nid(struct zone *zone) #endif } -#ifdef NODE_NOT_IN_PAGE_FLAGS -extern int page_to_nid(struct page *page); -#else -static inline int page_to_nid(struct page *page) -{ - return (page->flags >> NODES_PGSHIFT) & NODES_MASK; -} -#endif - -static inline struct zone *page_zone(struct page *page) +static inline unsigned long page_to_nid(struct page *page) { - return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; + if (FLAGS_HAS_NODE) + return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + else + return zone_to_nid(page_zone(page)); } - static inline unsigned long page_to_section(struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; @@ -513,7 +477,6 @@ static inline void set_page_node(struct page *page, unsigned long node) page->flags &= ~(NODES_MASK << NODES_PGSHIFT); page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; } - static inline void set_page_section(struct page *page, unsigned long section) { page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); @@ -984,6 +947,8 @@ extern void mem_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); +extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid, + unsigned long pfn, unsigned long size); #ifdef CONFIG_NUMA extern void setup_per_cpu_pageset(void); diff --git a/trunk/include/linux/mmc/host.h b/trunk/include/linux/mmc/host.h index c15ae1986b98..528e7d3fecb1 100644 --- a/trunk/include/linux/mmc/host.h +++ b/trunk/include/linux/mmc/host.h @@ -110,7 +110,7 @@ struct mmc_host { struct mmc_card *card_busy; /* the MMC card claiming host */ struct mmc_card *card_selected; /* the selected MMC card */ - struct delayed_work detect; + struct work_struct detect; unsigned long private[0] ____cacheline_aligned; }; diff --git a/trunk/include/linux/mmzone.h b/trunk/include/linux/mmzone.h index e339a7345f25..e06683e2bea3 100644 --- a/trunk/include/linux/mmzone.h +++ b/trunk/include/linux/mmzone.h @@ -278,7 +278,7 @@ struct zone { /* * rarely used fields: */ - const char *name; + char *name; } ____cacheline_internodealigned_in_smp; /* @@ -288,94 +288,19 @@ struct zone { */ #define DEF_PRIORITY 12 -/* Maximum number of zones on a zonelist */ -#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) - -#ifdef CONFIG_NUMA -/* - * We cache key information from each zonelist for smaller cache - * footprint when scanning for free pages in get_page_from_freelist(). - * - * 1) The BITMAP fullzones tracks which zones in a zonelist have come - * up short of free memory since the last time (last_fullzone_zap) - * we zero'd fullzones. - * 2) The array z_to_n[] maps each zone in the zonelist to its node - * id, so that we can efficiently evaluate whether that node is - * set in the current tasks mems_allowed. - * - * Both fullzones and z_to_n[] are one-to-one with the zonelist, - * indexed by a zones offset in the zonelist zones[] array. - * - * The get_page_from_freelist() routine does two scans. During the - * first scan, we skip zones whose corresponding bit in 'fullzones' - * is set or whose corresponding node in current->mems_allowed (which - * comes from cpusets) is not set. During the second scan, we bypass - * this zonelist_cache, to ensure we look methodically at each zone. - * - * Once per second, we zero out (zap) fullzones, forcing us to - * reconsider nodes that might have regained more free memory. - * The field last_full_zap is the time we last zapped fullzones. - * - * This mechanism reduces the amount of time we waste repeatedly - * reexaming zones for free memory when they just came up low on - * memory momentarilly ago. - * - * The zonelist_cache struct members logically belong in struct - * zonelist. However, the mempolicy zonelists constructed for - * MPOL_BIND are intentionally variable length (and usually much - * shorter). A general purpose mechanism for handling structs with - * multiple variable length members is more mechanism than we want - * here. We resort to some special case hackery instead. - * - * The MPOL_BIND zonelists don't need this zonelist_cache (in good - * part because they are shorter), so we put the fixed length stuff - * at the front of the zonelist struct, ending in a variable length - * zones[], as is needed by MPOL_BIND. - * - * Then we put the optional zonelist cache on the end of the zonelist - * struct. This optional stuff is found by a 'zlcache_ptr' pointer in - * the fixed length portion at the front of the struct. This pointer - * both enables us to find the zonelist cache, and in the case of - * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) - * to know that the zonelist cache is not there. - * - * The end result is that struct zonelists come in two flavors: - * 1) The full, fixed length version, shown below, and - * 2) The custom zonelists for MPOL_BIND. - * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. - * - * Even though there may be multiple CPU cores on a node modifying - * fullzones or last_full_zap in the same zonelist_cache at the same - * time, we don't lock it. This is just hint data - if it is wrong now - * and then, the allocator will still function, perhaps a bit slower. - */ - - -struct zonelist_cache { - unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ - DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ - unsigned long last_full_zap; /* when last zap'd (jiffies) */ -}; -#else -struct zonelist_cache; -#endif - /* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the * allocation, the other zones are fallback zones, in decreasing * priority. * - * If zlcache_ptr is not NULL, then it is just the address of zlcache, - * as explained above. If zlcache_ptr is NULL, there is no zlcache. + * Right now a zonelist takes up less than a cacheline. We never + * modify it apart from boot-up, and only a few indices are used, + * so despite the zonelist table being relatively big, the cache + * footprint of this construct is very small. */ - struct zonelist { - struct zonelist_cache *zlcache_ptr; // NULL or &zlcache - struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited -#ifdef CONFIG_NUMA - struct zonelist_cache zlcache; // optional ... -#endif + struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited }; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP diff --git a/trunk/include/linux/moduleparam.h b/trunk/include/linux/moduleparam.h index 4a189dadb160..7c0c2c198f1f 100644 --- a/trunk/include/linux/moduleparam.h +++ b/trunk/include/linux/moduleparam.h @@ -63,9 +63,6 @@ struct kparam_array not there, read bits mean it's readable, write bits mean it's writable. */ #define __module_param_call(prefix, name, set, get, arg, perm) \ - /* Default value instead of permissions? */ \ - static int __param_perm_check_##name __attribute__((unused)) = \ - BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \ static char __param_str_##name[] = prefix #name; \ static struct kernel_param const __param_##name \ __attribute_used__ \ diff --git a/trunk/include/linux/msg.h b/trunk/include/linux/msg.h index f1b60740d641..acc7c174ff00 100644 --- a/trunk/include/linux/msg.h +++ b/trunk/include/linux/msg.h @@ -92,12 +92,6 @@ struct msg_queue { struct list_head q_senders; }; -/* Helper routines for sys_msgsnd and sys_msgrcv */ -extern long do_msgsnd(int msqid, long mtype, void __user *mtext, - size_t msgsz, int msgflg); -extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg); - #endif /* __KERNEL__ */ #endif /* _LINUX_MSG_H */ diff --git a/trunk/include/linux/mutex.h b/trunk/include/linux/mutex.h index b2b91c477563..27c48daa3183 100644 --- a/trunk/include/linux/mutex.h +++ b/trunk/include/linux/mutex.h @@ -94,7 +94,7 @@ do { \ #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = SPIN_LOCK_UNLOCKED \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } diff --git a/trunk/include/linux/nbd.h b/trunk/include/linux/nbd.h index 0f3e69302540..d6b6dc09ad97 100644 --- a/trunk/include/linux/nbd.h +++ b/trunk/include/linux/nbd.h @@ -64,7 +64,6 @@ struct nbd_device { struct gendisk *disk; int blksize; u64 bytesize; - pid_t pid; /* pid of nbd-client, if attached */ }; #endif diff --git a/trunk/include/linux/ncp_fs_sb.h b/trunk/include/linux/ncp_fs_sb.h index a503052138bd..b089d9506283 100644 --- a/trunk/include/linux/ncp_fs_sb.h +++ b/trunk/include/linux/ncp_fs_sb.h @@ -127,10 +127,10 @@ struct ncp_server { } unexpected_packet; }; -extern void ncp_tcp_rcv_proc(struct work_struct *work); -extern void ncp_tcp_tx_proc(struct work_struct *work); -extern void ncpdgram_rcv_proc(struct work_struct *work); -extern void ncpdgram_timeout_proc(struct work_struct *work); +extern void ncp_tcp_rcv_proc(void *server); +extern void ncp_tcp_tx_proc(void *server); +extern void ncpdgram_rcv_proc(void *server); +extern void ncpdgram_timeout_proc(void *server); extern void ncpdgram_timeout_call(unsigned long server); extern void ncp_tcp_data_ready(struct sock* sk, int len); extern void ncp_tcp_write_space(struct sock* sk); diff --git a/trunk/include/linux/netpoll.h b/trunk/include/linux/netpoll.h index 29930b71a9aa..2cc9867b1626 100644 --- a/trunk/include/linux/netpoll.h +++ b/trunk/include/linux/netpoll.h @@ -32,7 +32,7 @@ struct netpoll_info { struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct sk_buff_head arp_tx; /* list of arp requests to reply to */ struct sk_buff_head txq; - struct delayed_work tx_work; + struct work_struct tx_work; }; void netpoll_poll(struct netpoll *np); diff --git a/trunk/include/linux/nfs_fs_sb.h b/trunk/include/linux/nfs_fs_sb.h index 95796e6924f1..7ccfc7ef0a83 100644 --- a/trunk/include/linux/nfs_fs_sb.h +++ b/trunk/include/linux/nfs_fs_sb.h @@ -51,7 +51,7 @@ struct nfs_client { unsigned long cl_lease_time; unsigned long cl_last_renewal; - struct delayed_work cl_renewd; + struct work_struct cl_renewd; struct rpc_wait_queue cl_rpcwaitq; diff --git a/trunk/include/linux/pci_ids.h b/trunk/include/linux/pci_ids.h index ff2dcb436cd0..c09da1e30c54 100644 --- a/trunk/include/linux/pci_ids.h +++ b/trunk/include/linux/pci_ids.h @@ -390,7 +390,7 @@ #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d #define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e #define PCI_DEVICE_ID_NS_CS5535_USB 0x002f -#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 +#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030 #define PCI_DEVICE_ID_NS_SATURN 0x0035 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 #define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 @@ -403,7 +403,8 @@ #define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 #define PCI_DEVICE_ID_NS_87410 0xd001 -#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 +#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028 +#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 @@ -1863,7 +1864,6 @@ #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 -#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 #define PCI_VENDOR_ID_SAMSUNG 0x144d diff --git a/trunk/include/linux/profile.h b/trunk/include/linux/profile.h index 5670b340c4ef..acce53fd38b6 100644 --- a/trunk/include/linux/profile.h +++ b/trunk/include/linux/profile.h @@ -6,15 +6,10 @@ #include #include #include -#include - #include -extern int prof_on __read_mostly; - #define CPU_PROFILING 1 #define SCHED_PROFILING 2 -#define SLEEP_PROFILING 3 struct proc_dir_entry; struct pt_regs; @@ -23,24 +18,7 @@ struct notifier_block; /* init basic kernel profiler */ void __init profile_init(void); void profile_tick(int); - -/* - * Add multiple profiler hits to a given address: - */ -void profile_hits(int, void *ip, unsigned int nr_hits); - -/* - * Single profiler hit: - */ -static inline void profile_hit(int type, void *ip) -{ - /* - * Speedup for the common (no profiling enabled) case: - */ - if (unlikely(prof_on == type)) - profile_hits(type, ip, 1); -} - +void profile_hit(int, void *); #ifdef CONFIG_PROC_FS void create_prof_cpu_mask(struct proc_dir_entry *); #else diff --git a/trunk/include/linux/quotaops.h b/trunk/include/linux/quotaops.h index 90c23f690c0d..5110201a4159 100644 --- a/trunk/include/linux/quotaops.h +++ b/trunk/include/linux/quotaops.h @@ -37,9 +37,6 @@ extern int dquot_release(struct dquot *dquot); extern int dquot_commit_info(struct super_block *sb, int type); extern int dquot_mark_dquot_dirty(struct dquot *dquot); -int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head); - extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); diff --git a/trunk/include/linux/radix-tree.h b/trunk/include/linux/radix-tree.h index 0deb842541ac..cbfa11537421 100644 --- a/trunk/include/linux/radix-tree.h +++ b/trunk/include/linux/radix-tree.h @@ -1,7 +1,6 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig - * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -22,35 +21,6 @@ #include #include -#include -#include - -/* - * A direct pointer (root->rnode pointing directly to a data item, - * rather than another radix_tree_node) is signalled by the low bit - * set in the root->rnode pointer. - * - * In this case root->height is also NULL, but the direct pointer tests are - * needed for RCU lookups when root->height is unreliable. - */ -#define RADIX_TREE_DIRECT_PTR 1 - -static inline void *radix_tree_ptr_to_direct(void *ptr) -{ - return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR); -} - -static inline void *radix_tree_direct_to_ptr(void *ptr) -{ - return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR); -} - -static inline int radix_tree_is_direct_ptr(void *ptr) -{ - return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR); -} - -/*** radix-tree API starts here ***/ #define RADIX_TREE_MAX_TAGS 2 @@ -77,77 +47,6 @@ do { \ (root)->rnode = NULL; \ } while (0) -/** - * Radix-tree synchronization - * - * The radix-tree API requires that users provide all synchronisation (with - * specific exceptions, noted below). - * - * Synchronization of access to the data items being stored in the tree, and - * management of their lifetimes must be completely managed by API users. - * - * For API usage, in general, - * - any function _modifying_ the the tree or tags (inserting or deleting - * items, setting or clearing tags must exclude other modifications, and - * exclude any functions reading the tree. - * - any function _reading_ the the tree or tags (looking up items or tags, - * gang lookups) must exclude modifications to the tree, but may occur - * concurrently with other readers. - * - * The notable exceptions to this rule are the following functions: - * radix_tree_lookup - * radix_tree_tag_get - * radix_tree_gang_lookup - * radix_tree_gang_lookup_tag - * radix_tree_tagged - * - * The first 4 functions are able to be called locklessly, using RCU. The - * caller must ensure calls to these functions are made within rcu_read_lock() - * regions. Other readers (lock-free or otherwise) and modifications may be - * running concurrently. - * - * It is still required that the caller manage the synchronization and lifetimes - * of the items. So if RCU lock-free lookups are used, typically this would mean - * that the items have their own locks, or are amenable to lock-free access; and - * that the items are freed by RCU (or only freed after having been deleted from - * the radix tree *and* a synchronize_rcu() grace period). - * - * (Note, rcu_assign_pointer and rcu_dereference are not needed to control - * access to data items when inserting into or looking up from the radix tree) - * - * radix_tree_tagged is able to be called without locking or RCU. - */ - -/** - * radix_tree_deref_slot - dereference a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. - * - * For use with radix_tree_lookup_slot(). Caller must hold tree at least read - * locked across slot lookup and dereference. More likely, will be used with - * radix_tree_replace_slot(), as well, so caller will hold tree write locked. - */ -static inline void *radix_tree_deref_slot(void **pslot) -{ - return radix_tree_direct_to_ptr(*pslot); -} -/** - * radix_tree_replace_slot - replace item in a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * @item: new item to store in the slot. - * - * For use with radix_tree_lookup_slot(). Caller must hold tree write locked - * across slot lookup and replacement. - */ -static inline void radix_tree_replace_slot(void **pslot, void *item) -{ - BUG_ON(radix_tree_is_direct_ptr(item)); - rcu_assign_pointer(*pslot, - (void *)((unsigned long)item | - ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR))); -} - int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); diff --git a/trunk/include/linux/raid/raid5.h b/trunk/include/linux/raid/raid5.h index 03636d7918fe..f13299a15591 100644 --- a/trunk/include/linux/raid/raid5.h +++ b/trunk/include/linux/raid/raid5.h @@ -235,7 +235,7 @@ struct raid5_private_data { */ int active_name; char cache_name[2][20]; - struct kmem_cache *slab_cache; /* for allocating stripes */ + kmem_cache_t *slab_cache; /* for allocating stripes */ int seq_flush, seq_write; int quiesce; diff --git a/trunk/include/linux/reiserfs_fs.h b/trunk/include/linux/reiserfs_fs.h index d0e4dce33ad5..7bc6bfb86253 100644 --- a/trunk/include/linux/reiserfs_fs.h +++ b/trunk/include/linux/reiserfs_fs.h @@ -739,7 +739,7 @@ struct block_head { #define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) /* Get right delimiting key. -- little endian */ -#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)))) +#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)) /* Does the buffer contain a disk leaf. */ #define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) diff --git a/trunk/include/linux/reiserfs_fs_sb.h b/trunk/include/linux/reiserfs_fs_sb.h index 3a28742d86f9..62a7169aed8b 100644 --- a/trunk/include/linux/reiserfs_fs_sb.h +++ b/trunk/include/linux/reiserfs_fs_sb.h @@ -249,8 +249,7 @@ struct reiserfs_journal { int j_errno; /* when flushing ordered buffers, throttle new ordered writers */ - struct delayed_work j_work; - struct super_block *j_work_sb; + struct work_struct j_work; atomic_t j_async_throttle; }; diff --git a/trunk/include/linux/relay.h b/trunk/include/linux/relay.h index c6a48bfc8b14..24accb483849 100644 --- a/trunk/include/linux/relay.h +++ b/trunk/include/linux/relay.h @@ -38,7 +38,7 @@ struct rchan_buf size_t subbufs_consumed; /* count of sub-buffers consumed */ struct rchan *chan; /* associated channel */ wait_queue_head_t read_wait; /* reader wait queue */ - struct delayed_work wake_readers; /* reader wake-up work struct */ + struct work_struct wake_readers; /* reader wake-up work struct */ struct dentry *dentry; /* channel file dentry */ struct kref kref; /* channel buffer refcount */ struct page **page_array; /* array of current buffer pages */ @@ -274,7 +274,7 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf, /* * exported relay file operations, kernel/relay.c */ -extern const struct file_operations relay_file_operations; +extern struct file_operations relay_file_operations; #endif /* _LINUX_RELAY_H */ diff --git a/trunk/include/linux/rmap.h b/trunk/include/linux/rmap.h index 36f850373d2c..db2c1df4fef9 100644 --- a/trunk/include/linux/rmap.h +++ b/trunk/include/linux/rmap.h @@ -30,11 +30,11 @@ struct anon_vma { #ifdef CONFIG_MMU -extern struct kmem_cache *anon_vma_cachep; +extern kmem_cache_t *anon_vma_cachep; static inline struct anon_vma *anon_vma_alloc(void) { - return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); + return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); } static inline void anon_vma_free(struct anon_vma *anon_vma) diff --git a/trunk/include/linux/rtmutex.h b/trunk/include/linux/rtmutex.h index b0090e9f7884..5d41dee82f80 100644 --- a/trunk/include/linux/rtmutex.h +++ b/trunk/include/linux/rtmutex.h @@ -63,7 +63,7 @@ struct hrtimer_sleeper; #endif #define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + { .wait_lock = SPIN_LOCK_UNLOCKED \ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} diff --git a/trunk/include/linux/rwsem-spinlock.h b/trunk/include/linux/rwsem-spinlock.h index 813cee13da0d..ae1fcadd598e 100644 --- a/trunk/include/linux/rwsem-spinlock.h +++ b/trunk/include/linux/rwsem-spinlock.h @@ -44,8 +44,7 @@ struct rw_semaphore { #endif #define __RWSEM_INITIALIZER(name) \ -{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index dede82c63445..eafe4a7b8237 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -194,16 +194,7 @@ extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; -/* - * Only dump TASK_* tasks. (-1 for all tasks) - */ -extern void show_state_filter(unsigned long state_filter); - -static inline void show_state(void) -{ - show_state_filter(-1); -} - +extern void show_state(void); extern void show_regs(struct pt_regs *); /* @@ -347,23 +338,15 @@ struct mm_struct { unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + unsigned dumpable:2; cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ mm_context_t context; - /* Swap token stuff */ - /* - * Last value of global fault stamp as seen by this process. - * In other words, this value gives an indication of how long - * it has been since this task got the token. - * Look at mm/thrash.c - */ - unsigned int faultstamp; - unsigned int token_priority; - unsigned int last_interval; - - unsigned char dumpable:2; + /* Token based thrashing protection. */ + unsigned long swap_token_time; + char recent_pagein; /* coredumping support */ int core_waiters; @@ -573,7 +556,7 @@ struct sched_info { #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ #ifdef CONFIG_SCHEDSTATS -extern const struct file_operations proc_schedstat_operations; +extern struct file_operations proc_schedstat_operations; #endif /* CONFIG_SCHEDSTATS */ #ifdef CONFIG_TASK_DELAY_ACCT @@ -1305,6 +1288,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); +extern int kill_proc_info(int, struct siginfo *, pid_t); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); @@ -1626,6 +1610,87 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); extern void normalize_rt_tasks(void); +#ifdef CONFIG_PM +/* + * Check if a process has been frozen + */ +static inline int frozen(struct task_struct *p) +{ + return p->flags & PF_FROZEN; +} + +/* + * Check if there is a request to freeze a process + */ +static inline int freezing(struct task_struct *p) +{ + return p->flags & PF_FREEZE; +} + +/* + * Request that a process be frozen + * FIXME: SMP problem. We may not modify other process' flags! + */ +static inline void freeze(struct task_struct *p) +{ + p->flags |= PF_FREEZE; +} + +/* + * Sometimes we may need to cancel the previous 'freeze' request + */ +static inline void do_not_freeze(struct task_struct *p) +{ + p->flags &= ~PF_FREEZE; +} + +/* + * Wake up a frozen process + */ +static inline int thaw_process(struct task_struct *p) +{ + if (frozen(p)) { + p->flags &= ~PF_FROZEN; + wake_up_process(p); + return 1; + } + return 0; +} + +/* + * freezing is complete, mark process as frozen + */ +static inline void frozen_process(struct task_struct *p) +{ + p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; +} + +extern void refrigerator(void); +extern int freeze_processes(void); +extern void thaw_processes(void); + +static inline int try_to_freeze(void) +{ + if (freezing(current)) { + refrigerator(); + return 1; + } else + return 0; +} +#else +static inline int frozen(struct task_struct *p) { return 0; } +static inline int freezing(struct task_struct *p) { return 0; } +static inline void freeze(struct task_struct *p) { BUG(); } +static inline int thaw_process(struct task_struct *p) { return 1; } +static inline void frozen_process(struct task_struct *p) { BUG(); } + +static inline void refrigerator(void) {} +static inline int freeze_processes(void) { BUG(); return 0; } +static inline void thaw_processes(void) {} + +static inline int try_to_freeze(void) { return 0; } + +#endif /* CONFIG_PM */ #endif /* __KERNEL__ */ #endif diff --git a/trunk/include/linux/seq_file.h b/trunk/include/linux/seq_file.h index 3e3cccbb1cac..b95f6eb7254c 100644 --- a/trunk/include/linux/seq_file.h +++ b/trunk/include/linux/seq_file.h @@ -20,7 +20,7 @@ struct seq_file { loff_t index; loff_t version; struct mutex lock; - const struct seq_operations *op; + struct seq_operations *op; void *private; }; @@ -31,7 +31,7 @@ struct seq_operations { int (*show) (struct seq_file *m, void *v); }; -int seq_open(struct file *, const struct seq_operations *); +int seq_open(struct file *, struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); loff_t seq_lseek(struct file *, loff_t, int); int seq_release(struct inode *, struct file *); diff --git a/trunk/include/linux/serial_8250.h b/trunk/include/linux/serial_8250.h index 71310d80c09a..8e9681413726 100644 --- a/trunk/include/linux/serial_8250.h +++ b/trunk/include/linux/serial_8250.h @@ -41,7 +41,6 @@ enum { PLAT8250_DEV_FOURPORT, PLAT8250_DEV_ACCENT, PLAT8250_DEV_BOCA, - PLAT8250_DEV_EXAR_ST16C554, PLAT8250_DEV_HUB6, PLAT8250_DEV_MCA, PLAT8250_DEV_AU1X00, diff --git a/trunk/include/linux/serial_core.h b/trunk/include/linux/serial_core.h index 827672136646..463ab953b092 100644 --- a/trunk/include/linux/serial_core.h +++ b/trunk/include/linux/serial_core.h @@ -132,8 +132,6 @@ #define PORT_S3C2412 73 -/* Xilinx uartlite */ -#define PORT_UARTLITE 74 #ifdef __KERNEL__ diff --git a/trunk/include/linux/signal.h b/trunk/include/linux/signal.h index 14749056dd63..117135e33d67 100644 --- a/trunk/include/linux/signal.h +++ b/trunk/include/linux/signal.h @@ -241,8 +241,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); struct pt_regs; extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); -extern struct kmem_cache *sighand_cachep; - #endif /* __KERNEL__ */ #endif /* _LINUX_SIGNAL_H */ diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index 4ff3940210d8..a05a5f7c0b73 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -332,20 +332,20 @@ struct sk_buff { extern void kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, - gfp_t priority, int fclone, int node); + gfp_t priority, int fclone); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { - return __alloc_skb(size, priority, 0, -1); + return __alloc_skb(size, priority, 0); } static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { - return __alloc_skb(size, priority, 1, -1); + return __alloc_skb(size, priority, 1); } -extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, +extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, gfp_t priority); extern void kfree_skbmem(struct sk_buff *skb); diff --git a/trunk/include/linux/slab.h b/trunk/include/linux/slab.h index 2271886744f8..c4947b8a2c03 100644 --- a/trunk/include/linux/slab.h +++ b/trunk/include/linux/slab.h @@ -7,17 +7,27 @@ #ifndef _LINUX_SLAB_H #define _LINUX_SLAB_H -#ifdef __KERNEL__ +#if defined(__KERNEL__) -#include -#include -#include -#include /* kmalloc_sizes.h needs PAGE_SIZE */ -#include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ -#include +typedef struct kmem_cache kmem_cache_t; -/* kmem_cache_t exists for legacy reasons and is not used by code in mm */ -typedef struct kmem_cache kmem_cache_t __deprecated; +#include +#include +#include +#include /* kmalloc_sizes.h needs PAGE_SIZE */ +#include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ + +/* flags for kmem_cache_alloc() */ +#define SLAB_NOFS GFP_NOFS +#define SLAB_NOIO GFP_NOIO +#define SLAB_ATOMIC GFP_ATOMIC +#define SLAB_USER GFP_USER +#define SLAB_KERNEL GFP_KERNEL +#define SLAB_DMA GFP_DMA + +#define SLAB_LEVEL_MASK GFP_LEVEL_MASK + +#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */ /* flags to pass to kmem_cache_create(). * The first 3 are only valid when the allocator as been build @@ -47,23 +57,22 @@ typedef struct kmem_cache kmem_cache_t __deprecated; /* prototypes */ extern void __init kmem_cache_init(void); -extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - unsigned long, - void (*)(void *, struct kmem_cache *, unsigned long), - void (*)(void *, struct kmem_cache *, unsigned long)); -extern void kmem_cache_destroy(struct kmem_cache *); -extern int kmem_cache_shrink(struct kmem_cache *); -extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, + void (*)(void *, kmem_cache_t *, unsigned long), + void (*)(void *, kmem_cache_t *, unsigned long)); +extern void kmem_cache_destroy(kmem_cache_t *); +extern int kmem_cache_shrink(kmem_cache_t *); +extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); -extern void kmem_cache_free(struct kmem_cache *, void *); -extern unsigned int kmem_cache_size(struct kmem_cache *); -extern const char *kmem_cache_name(struct kmem_cache *); +extern void kmem_cache_free(kmem_cache_t *, void *); +extern unsigned int kmem_cache_size(kmem_cache_t *); +extern const char *kmem_cache_name(kmem_cache_t *); /* Size description struct for general caches. */ struct cache_sizes { - size_t cs_size; - struct kmem_cache *cs_cachep; - struct kmem_cache *cs_dmacachep; + size_t cs_size; + kmem_cache_t *cs_cachep; + kmem_cache_t *cs_dmacachep; }; extern struct cache_sizes malloc_sizes[]; @@ -202,7 +211,7 @@ extern unsigned int ksize(const void *); extern int slab_is_available(void); #ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); +extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); extern void *__kmalloc_node(size_t size, gfp_t flags, int node); static inline void *kmalloc_node(size_t size, gfp_t flags, int node) @@ -227,27 +236,8 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) } return __kmalloc_node(size, flags, node); } - -/* - * kmalloc_node_track_caller is a special version of kmalloc_node that - * records the calling function of the routine calling it for slab leak - * tracking instead of just the calling function (confusing, eh?). - * It's useful when the call to kmalloc_node comes from a widely-used - * standard allocator where we care about the real place the memory - * allocation request comes from. - */ -#ifndef CONFIG_DEBUG_SLAB -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node(size, flags, node) #else -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node_track_caller(size, flags, node, \ - __builtin_return_address(0)) -#endif -#else /* CONFIG_NUMA */ -static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, - gfp_t flags, int node) +static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) { return kmem_cache_alloc(cachep, flags); } @@ -255,13 +245,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { return kmalloc(size, flags); } - -#define kmalloc_node_track_caller(size, flags, node) \ - kmalloc_track_caller(size, flags) #endif extern int FASTCALL(kmem_cache_reap(int)); -extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)); +extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); #else /* CONFIG_SLOB */ @@ -296,10 +283,17 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) #define kzalloc(s, f) __kzalloc(s, f) #define kmalloc_track_caller kmalloc -#define kmalloc_node_track_caller kmalloc_node - #endif /* CONFIG_SLOB */ +/* System wide caches */ +extern kmem_cache_t *vm_area_cachep; +extern kmem_cache_t *names_cachep; +extern kmem_cache_t *files_cachep; +extern kmem_cache_t *filp_cachep; +extern kmem_cache_t *fs_cachep; +extern kmem_cache_t *sighand_cachep; +extern kmem_cache_t *bio_cachep; + #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h index 7ba23ec8211b..51649987f691 100644 --- a/trunk/include/linux/smp.h +++ b/trunk/include/linux/smp.h @@ -99,13 +99,6 @@ static inline int up_smp_call_function(void) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) -static inline int smp_call_function_single(int cpuid, void (*func) (void *info), - void *info, int retry, int wait) -{ - /* Disable interrupts here? */ - func(info); - return 0; -} #endif /* !SMP */ diff --git a/trunk/include/linux/spinlock.h b/trunk/include/linux/spinlock.h index 94b767d64275..8451052ca66f 100644 --- a/trunk/include/linux/spinlock.h +++ b/trunk/include/linux/spinlock.h @@ -52,7 +52,6 @@ #include #include #include -#include #include diff --git a/trunk/include/linux/sunrpc/rpc_pipe_fs.h b/trunk/include/linux/sunrpc/rpc_pipe_fs.h index 4a68125b6de6..a2eb9b4a9de3 100644 --- a/trunk/include/linux/sunrpc/rpc_pipe_fs.h +++ b/trunk/include/linux/sunrpc/rpc_pipe_fs.h @@ -30,7 +30,7 @@ struct rpc_inode { #define RPC_PIPE_WAIT_FOR_OPEN 1 int flags; struct rpc_pipe_ops *ops; - struct delayed_work queue_timeout; + struct work_struct queue_timeout; }; static inline struct rpc_inode * diff --git a/trunk/include/linux/sunrpc/sched.h b/trunk/include/linux/sunrpc/sched.h index 0746c3b16f3a..f399c138f79d 100644 --- a/trunk/include/linux/sunrpc/sched.h +++ b/trunk/include/linux/sunrpc/sched.h @@ -222,7 +222,7 @@ struct rpc_wait_queue { #ifndef RPC_DEBUG # define RPC_WAITQ_INIT(var,qname) { \ - .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ + .lock = SPIN_LOCK_UNLOCKED, \ .tasks = { \ [0] = LIST_HEAD_INIT(var.tasks[0]), \ [1] = LIST_HEAD_INIT(var.tasks[1]), \ @@ -231,7 +231,7 @@ struct rpc_wait_queue { } #else # define RPC_WAITQ_INIT(var,qname) { \ - .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ + .lock = SPIN_LOCK_UNLOCKED, \ .tasks = { \ [0] = LIST_HEAD_INIT(var.tasks[0]), \ [1] = LIST_HEAD_INIT(var.tasks[1]), \ diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h index 3e04c1512fc4..60394fbc4c70 100644 --- a/trunk/include/linux/sunrpc/xprt.h +++ b/trunk/include/linux/sunrpc/xprt.h @@ -177,7 +177,7 @@ struct rpc_xprt { unsigned long connect_timeout, bind_timeout, reestablish_timeout; - struct delayed_work connect_worker; + struct work_struct connect_worker; unsigned short port; /* diff --git a/trunk/include/linux/suspend.h b/trunk/include/linux/suspend.h index bf99bd49f8ef..b1237f16ecde 100644 --- a/trunk/include/linux/suspend.h +++ b/trunk/include/linux/suspend.h @@ -9,13 +9,10 @@ #include #include -/* struct pbe is used for creating lists of pages that should be restored - * atomically during the resume from disk, because the page frames they have - * occupied before the suspend are in use. - */ +/* page backup entry */ struct pbe { - void *address; /* address of the copy */ - void *orig_address; /* original address of a page */ + unsigned long address; /* address of the copy */ + unsigned long orig_address; /* original address of page */ struct pbe *next; }; diff --git a/trunk/include/linux/swap.h b/trunk/include/linux/swap.h index add51cebc8d9..e7c36ba2a2db 100644 --- a/trunk/include/linux/swap.h +++ b/trunk/include/linux/swap.h @@ -218,6 +218,8 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); /* linux/mm/page_io.c */ extern int swap_readpage(struct file *, struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); +extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page, + struct bio **bio_chain); extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err); /* linux/mm/swap_state.c */ @@ -245,10 +247,9 @@ extern int swap_duplicate(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); extern void free_swap_and_cache(swp_entry_t); -extern int swap_type_of(dev_t, sector_t); +extern int swap_type_of(dev_t); extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); -extern sector_t swapdev_block(int, pgoff_t); extern struct swap_info_struct *get_swap_info_struct(unsigned); extern int can_share_swap_page(struct page *); extern int remove_exclusive_swap_page(struct page *); @@ -258,6 +259,7 @@ extern spinlock_t swap_lock; /* linux/mm/thrash.c */ extern struct mm_struct * swap_token_mm; +extern unsigned long swap_token_default_timeout; extern void grab_swap_token(void); extern void __put_swap_token(struct mm_struct *); diff --git a/trunk/include/linux/taskstats_kern.h b/trunk/include/linux/taskstats_kern.h index 7e9680f4afdd..6562a2050a25 100644 --- a/trunk/include/linux/taskstats_kern.h +++ b/trunk/include/linux/taskstats_kern.h @@ -12,27 +12,64 @@ #include #ifdef CONFIG_TASKSTATS -extern struct kmem_cache *taskstats_cache; +extern kmem_cache_t *taskstats_cache; extern struct mutex taskstats_exit_mutex; +static inline void taskstats_exit_free(struct taskstats *tidstats) +{ + if (tidstats) + kmem_cache_free(taskstats_cache, tidstats); +} + static inline void taskstats_tgid_init(struct signal_struct *sig) { sig->stats = NULL; } +static inline void taskstats_tgid_alloc(struct task_struct *tsk) +{ + struct signal_struct *sig = tsk->signal; + struct taskstats *stats; + + if (sig->stats != NULL) + return; + + /* No problem if kmem_cache_zalloc() fails */ + stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); + + spin_lock_irq(&tsk->sighand->siglock); + if (!sig->stats) { + sig->stats = stats; + stats = NULL; + } + spin_unlock_irq(&tsk->sighand->siglock); + + if (stats) + kmem_cache_free(taskstats_cache, stats); +} + static inline void taskstats_tgid_free(struct signal_struct *sig) { if (sig->stats) kmem_cache_free(taskstats_cache, sig->stats); } -extern void taskstats_exit(struct task_struct *, int group_dead); +extern void taskstats_exit_alloc(struct taskstats **, unsigned int *); +extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int); extern void taskstats_init_early(void); #else -static inline void taskstats_exit(struct task_struct *tsk, int group_dead) +static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) +{} +static inline void taskstats_exit_free(struct taskstats *ptidstats) +{} +static inline void taskstats_exit_send(struct task_struct *tsk, + struct taskstats *tidstats, + int group_dead, unsigned int cpu) {} static inline void taskstats_tgid_init(struct signal_struct *sig) {} +static inline void taskstats_tgid_alloc(struct task_struct *tsk) +{} static inline void taskstats_tgid_free(struct signal_struct *sig) {} static inline void taskstats_init_early(void) diff --git a/trunk/include/linux/tty.h b/trunk/include/linux/tty.h index f717f0898238..65321f911c1e 100644 --- a/trunk/include/linux/tty.h +++ b/trunk/include/linux/tty.h @@ -53,7 +53,7 @@ struct tty_buffer { }; struct tty_bufhead { - struct delayed_work work; + struct work_struct work; struct semaphore pty_sem; spinlock_t lock; struct tty_buffer *head; /* Queue head */ diff --git a/trunk/include/linux/uaccess.h b/trunk/include/linux/uaccess.h index 76c3fe325101..a48d7f11c7be 100644 --- a/trunk/include/linux/uaccess.h +++ b/trunk/include/linux/uaccess.h @@ -1,43 +1,8 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ -#include #include -/* - * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. - */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} - #ifndef ARCH_HAS_NOCACHE_UACCESS static inline unsigned long __copy_from_user_inatomic_nocache(void *to, @@ -65,22 +30,14 @@ static inline unsigned long __copy_from_user_nocache(void *to, * do_page_fault() doesn't attempt to take mmap_sem. This makes * probe_kernel_address() suitable for use within regions where the caller * already holds mmap_sem, or other locks which nest inside mmap_sem. - * This must be a macro because __get_user() needs to know the types of the - * args. - * - * We don't include enough header files to be able to do the set_fs(). We - * require that the probe_kernel_address() caller will do that. */ #define probe_kernel_address(addr, retval) \ ({ \ long ret; \ - mm_segment_t old_fs = get_fs(); \ \ - set_fs(KERNEL_DS); \ - pagefault_disable(); \ + inc_preempt_count(); \ ret = __get_user(retval, addr); \ - pagefault_enable(); \ - set_fs(old_fs); \ + dec_preempt_count(); \ ret; \ }) diff --git a/trunk/include/linux/usb.h b/trunk/include/linux/usb.h index aab5b1b72021..0cd73edeef13 100644 --- a/trunk/include/linux/usb.h +++ b/trunk/include/linux/usb.h @@ -388,7 +388,7 @@ struct usb_device { int pm_usage_cnt; /* usage counter for autosuspend */ #ifdef CONFIG_PM - struct delayed_work autosuspend; /* for delayed autosuspends */ + struct work_struct autosuspend; /* for delayed autosuspends */ struct mutex pm_mutex; /* protects PM operations */ unsigned auto_pm:1; /* autosuspend/resume in progress */ diff --git a/trunk/include/linux/workqueue.h b/trunk/include/linux/workqueue.h index f0cb1df7b475..9bca3539a1e5 100644 --- a/trunk/include/linux/workqueue.h +++ b/trunk/include/linux/workqueue.h @@ -11,23 +11,12 @@ struct workqueue_struct; -struct work_struct; -typedef void (*work_func_t)(struct work_struct *work); - struct work_struct { - /* the first word is the work queue pointer and the flags rolled into - * one */ - unsigned long management; -#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ -#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ -#define WORK_STRUCT_FLAG_MASK (3UL) -#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) + unsigned long pending; struct list_head entry; - work_func_t func; -}; - -struct delayed_work { - struct work_struct work; + void (*func)(void *); + void *data; + void *wq_data; struct timer_list timer; }; @@ -35,159 +24,77 @@ struct execute_work { struct work_struct work; }; -#define __WORK_INITIALIZER(n, f) { \ - .management = 0, \ - .entry = { &(n).entry, &(n).entry }, \ - .func = (f), \ - } - -#define __WORK_INITIALIZER_NAR(n, f) { \ - .management = (1 << WORK_STRUCT_NOAUTOREL), \ +#define __WORK_INITIALIZER(n, f, d) { \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ - } - -#define __DELAYED_WORK_INITIALIZER(n, f) { \ - .work = __WORK_INITIALIZER((n).work, (f)), \ - .timer = TIMER_INITIALIZER(NULL, 0, 0), \ - } - -#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ - .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ + .data = (d), \ .timer = TIMER_INITIALIZER(NULL, 0, 0), \ } -#define DECLARE_WORK(n, f) \ - struct work_struct n = __WORK_INITIALIZER(n, f) - -#define DECLARE_WORK_NAR(n, f) \ - struct work_struct n = __WORK_INITIALIZER_NAR(n, f) - -#define DECLARE_DELAYED_WORK(n, f) \ - struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) - -#define DECLARE_DELAYED_WORK_NAR(n, f) \ - struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) +#define DECLARE_WORK(n, f, d) \ + struct work_struct n = __WORK_INITIALIZER(n, f, d) /* - * initialize a work item's function pointer + * initialize a work-struct's func and data pointers: */ -#define PREPARE_WORK(_work, _func) \ +#define PREPARE_WORK(_work, _func, _data) \ do { \ - (_work)->func = (_func); \ + (_work)->func = _func; \ + (_work)->data = _data; \ } while (0) -#define PREPARE_DELAYED_WORK(_work, _func) \ - PREPARE_WORK(&(_work)->work, (_func)) - /* - * initialize all of a work item in one go + * initialize all of a work-struct: */ -#define INIT_WORK(_work, _func) \ +#define INIT_WORK(_work, _func, _data) \ do { \ - (_work)->management = 0; \ INIT_LIST_HEAD(&(_work)->entry); \ - PREPARE_WORK((_work), (_func)); \ - } while (0) - -#define INIT_WORK_NAR(_work, _func) \ - do { \ - (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ - INIT_LIST_HEAD(&(_work)->entry); \ - PREPARE_WORK((_work), (_func)); \ - } while (0) - -#define INIT_DELAYED_WORK(_work, _func) \ - do { \ - INIT_WORK(&(_work)->work, (_func)); \ - init_timer(&(_work)->timer); \ - } while (0) - -#define INIT_DELAYED_WORK_NAR(_work, _func) \ - do { \ - INIT_WORK_NAR(&(_work)->work, (_func)); \ + (_work)->pending = 0; \ + PREPARE_WORK((_work), (_func), (_data)); \ init_timer(&(_work)->timer); \ } while (0) -/** - * work_pending - Find out whether a work item is currently pending - * @work: The work item in question - */ -#define work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, &(work)->management) - -/** - * delayed_work_pending - Find out whether a delayable work item is currently - * pending - * @work: The work item in question - */ -#define delayed_work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, &(work)->work.management) - -/** - * work_release - Release a work item under execution - * @work: The work item to release - * - * This is used to release a work item that has been initialised with automatic - * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work - * function the opportunity to grab auxiliary data from the container of the - * work_struct before clearing the pending bit as the work_struct may be - * subject to deallocation the moment the pending bit is cleared. - * - * In such a case, this should be called in the work function after it has - * fetched any data it may require from the containter of the work_struct. - * After this function has been called, the work_struct may be scheduled for - * further execution or it may be deallocated unless other precautions are - * taken. - * - * This should also be used to release a delayed work item. - */ -#define work_release(work) \ - clear_bit(WORK_STRUCT_PENDING, &(work)->management) - - extern struct workqueue_struct *__create_workqueue(const char *name, - int singlethread, - int freezeable); -#define create_workqueue(name) __create_workqueue((name), 0, 0) -#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) + int singlethread); +#define create_workqueue(name) __create_workqueue((name), 0) +#define create_singlethread_workqueue(name) __create_workqueue((name), 1) extern void destroy_workqueue(struct workqueue_struct *wq); extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); -extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); +extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct delayed_work *work, unsigned long delay); + struct work_struct *work, unsigned long delay); extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); extern int FASTCALL(schedule_work(struct work_struct *work)); -extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); +extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); -extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); -extern int schedule_on_each_cpu(work_func_t func); +extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); +extern int schedule_on_each_cpu(void (*func)(void *info), void *info); extern void flush_scheduled_work(void); extern int current_is_keventd(void); extern int keventd_up(void); extern void init_workqueues(void); -void cancel_rearming_delayed_work(struct delayed_work *work); +void cancel_rearming_delayed_work(struct work_struct *work); void cancel_rearming_delayed_workqueue(struct workqueue_struct *, - struct delayed_work *); -int execute_in_process_context(work_func_t fn, struct execute_work *); + struct work_struct *); +int execute_in_process_context(void (*fn)(void *), void *, + struct execute_work *); /* * Kill off a pending schedule_delayed_work(). Note that the work callback * function may still be running on return from cancel_delayed_work(). Run * flush_scheduled_work() to wait on it. */ -static inline int cancel_delayed_work(struct delayed_work *work) +static inline int cancel_delayed_work(struct work_struct *work) { int ret; ret = del_timer_sync(&work->timer); if (ret) - clear_bit(WORK_STRUCT_PENDING, &work->work.management); + clear_bit(0, &work->pending); return ret; } diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index 62b7e7598e9a..e156e38e4ac3 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -98,7 +98,7 @@ struct dst_ops int entry_size; atomic_t entries; - struct kmem_cache *kmem_cachep; + kmem_cache_t *kmem_cachep; }; #ifdef __KERNEL__ diff --git a/trunk/include/net/ieee80211softmac.h b/trunk/include/net/ieee80211softmac.h index 89119277553d..617b672b1132 100644 --- a/trunk/include/net/ieee80211softmac.h +++ b/trunk/include/net/ieee80211softmac.h @@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info { /* Scan retries remaining */ int scan_retry; - struct delayed_work work; - struct delayed_work timeout; + struct work_struct work; + struct work_struct timeout; }; struct ieee80211softmac_bss_info { diff --git a/trunk/include/net/inet_hashtables.h b/trunk/include/net/inet_hashtables.h index 34cc76e3ddb4..a9eb2eaf094e 100644 --- a/trunk/include/net/inet_hashtables.h +++ b/trunk/include/net/inet_hashtables.h @@ -125,7 +125,7 @@ struct inet_hashinfo { rwlock_t lhash_lock ____cacheline_aligned; atomic_t lhash_users; wait_queue_head_t lhash_wait; - struct kmem_cache *bind_bucket_cachep; + kmem_cache_t *bind_bucket_cachep; }; static inline struct inet_ehash_bucket *inet_ehash_bucket( @@ -136,10 +136,10 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket( } extern struct inet_bind_bucket * - inet_bind_bucket_create(struct kmem_cache *cachep, + inet_bind_bucket_create(kmem_cache_t *cachep, struct inet_bind_hashbucket *head, const unsigned short snum); -extern void inet_bind_bucket_destroy(struct kmem_cache *cachep, +extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb); static inline int inet_bhashfn(const __u16 lport, const int bhash_size) diff --git a/trunk/include/net/inet_timewait_sock.h b/trunk/include/net/inet_timewait_sock.h index f7be1ac73601..5f48748fe017 100644 --- a/trunk/include/net/inet_timewait_sock.h +++ b/trunk/include/net/inet_timewait_sock.h @@ -84,7 +84,7 @@ struct inet_timewait_death_row { }; extern void inet_twdr_hangman(unsigned long data); -extern void inet_twdr_twkill_work(struct work_struct *work); +extern void inet_twdr_twkill_work(void *data); extern void inet_twdr_twcal_tick(unsigned long data); #if (BITS_PER_LONG == 64) diff --git a/trunk/include/net/neighbour.h b/trunk/include/net/neighbour.h index 23967031ddb7..c8aacbd2e333 100644 --- a/trunk/include/net/neighbour.h +++ b/trunk/include/net/neighbour.h @@ -160,7 +160,7 @@ struct neigh_table atomic_t entries; rwlock_t lock; unsigned long last_rand; - struct kmem_cache *kmem_cachep; + kmem_cache_t *kmem_cachep; struct neigh_statistics *stats; struct neighbour **hash_buckets; unsigned int hash_mask; diff --git a/trunk/include/net/netfilter/nf_conntrack_expect.h b/trunk/include/net/netfilter/nf_conntrack_expect.h index 41bcc9eb4206..cef3136e22a3 100644 --- a/trunk/include/net/netfilter/nf_conntrack_expect.h +++ b/trunk/include/net/netfilter/nf_conntrack_expect.h @@ -7,7 +7,7 @@ #include extern struct list_head nf_conntrack_expect_list; -extern struct kmem_cache *nf_conntrack_expect_cachep; +extern kmem_cache_t *nf_conntrack_expect_cachep; extern struct file_operations exp_file_ops; struct nf_conntrack_expect diff --git a/trunk/include/net/request_sock.h b/trunk/include/net/request_sock.h index 7aed02ce2b65..e37baaf2080b 100644 --- a/trunk/include/net/request_sock.h +++ b/trunk/include/net/request_sock.h @@ -29,7 +29,7 @@ struct proto; struct request_sock_ops { int family; int obj_size; - struct kmem_cache *slab; + kmem_cache_t *slab; int (*rtx_syn_ack)(struct sock *sk, struct request_sock *req, struct dst_entry *dst); @@ -60,7 +60,7 @@ struct request_sock { static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) { - struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); + struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC); if (req != NULL) req->rsk_ops = ops; diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index c089f93ba591..f8cbe40f52c0 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *); void sctp_inq_free(struct sctp_inq *); void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); -void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t); +void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); /* This is the structure we use to hold outbound chunks. You push * chunks in and they automatically pop out the other end as bundled diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index 03684e702d13..fe3a33fad03f 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -571,7 +571,7 @@ struct proto { int *sysctl_rmem; int max_header; - struct kmem_cache *slab; + kmem_cache_t *slab; unsigned int obj_size; atomic_t *orphan_count; @@ -746,25 +746,6 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owner) -/* - * Macro so as to not evaluate some arguments when - * lockdep is not enabled. - * - * Mark both the sk_lock and the sk_lock.slock as a - * per-address-family lock class. - */ -#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ -do { \ - sk->sk_lock.owner = NULL; \ - init_waitqueue_head(&sk->sk_lock.wq); \ - spin_lock_init(&(sk)->sk_lock.slock); \ - debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ - sizeof((sk)->sk_lock)); \ - lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ - (skey), (sname)); \ - lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ -} while (0) - extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass)); static inline void lock_sock(struct sock *sk) diff --git a/trunk/include/net/timewait_sock.h b/trunk/include/net/timewait_sock.h index 1e1ee3253fd8..d7a306ea560d 100644 --- a/trunk/include/net/timewait_sock.h +++ b/trunk/include/net/timewait_sock.h @@ -15,7 +15,7 @@ #include struct timewait_sock_ops { - struct kmem_cache *twsk_slab; + kmem_cache_t *twsk_slab; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *sk, struct sock *sktw, void *twp); diff --git a/trunk/include/scsi/libsas.h b/trunk/include/scsi/libsas.h index 0c775fceb675..44b2f82a6eec 100644 --- a/trunk/include/scsi/libsas.h +++ b/trunk/include/scsi/libsas.h @@ -201,14 +201,9 @@ struct domain_device { void *lldd_dev; }; -struct sas_discovery_event { - struct work_struct work; - struct asd_sas_port *port; -}; - struct sas_discovery { spinlock_t disc_event_lock; - struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; + struct work_struct disc_work[DISC_NUM_EVENTS]; unsigned long pending; u8 fanout_sas_addr[8]; u8 eeds_a[8]; @@ -254,19 +249,14 @@ struct asd_sas_port { void *lldd_port; /* not touched by the sas class code */ }; -struct asd_sas_event { - struct work_struct work; - struct asd_sas_phy *phy; -}; - /* The phy pretty much is controlled by the LLDD. * The class only reads those fields. */ struct asd_sas_phy { /* private: */ /* protected by ha->event_lock */ - struct asd_sas_event port_events[PORT_NUM_EVENTS]; - struct asd_sas_event phy_events[PHY_NUM_EVENTS]; + struct work_struct port_events[PORT_NUM_EVENTS]; + struct work_struct phy_events[PHY_NUM_EVENTS]; unsigned long port_events_pending; unsigned long phy_events_pending; @@ -318,15 +308,10 @@ struct scsi_core { int queue_thread_kill; }; -struct sas_ha_event { - struct work_struct work; - struct sas_ha_struct *ha; -}; - struct sas_ha_struct { /* private: */ spinlock_t event_lock; - struct sas_ha_event ha_events[HA_NUM_EVENTS]; + struct work_struct ha_events[HA_NUM_EVENTS]; unsigned long pending; struct scsi_core core; @@ -557,7 +542,7 @@ struct sas_task { static inline struct sas_task *sas_alloc_task(gfp_t flags) { - extern struct kmem_cache *sas_task_cache; + extern kmem_cache_t *sas_task_cache; struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); if (task) { @@ -575,7 +560,7 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags) static inline void sas_free_task(struct sas_task *task) { if (task) { - extern struct kmem_cache *sas_task_cache; + extern kmem_cache_t *sas_task_cache; BUG_ON(!list_empty(&task->list)); kmem_cache_free(sas_task_cache, task); } @@ -646,6 +631,6 @@ void sas_unregister_dev(struct domain_device *); void sas_init_dev(struct domain_device *); -void sas_task_abort(struct work_struct *); +void sas_task_abort(struct sas_task *task); #endif /* _SASLIB_H_ */ diff --git a/trunk/include/scsi/scsi_transport_fc.h b/trunk/include/scsi/scsi_transport_fc.h index 798f7c7ee426..fd352323378b 100644 --- a/trunk/include/scsi/scsi_transport_fc.h +++ b/trunk/include/scsi/scsi_transport_fc.h @@ -206,9 +206,9 @@ struct fc_rport { /* aka fc_starget_attrs */ u8 flags; struct list_head peers; struct device dev; - struct delayed_work dev_loss_work; + struct work_struct dev_loss_work; struct work_struct scan_work; - struct delayed_work fail_io_work; + struct work_struct fail_io_work; struct work_struct stgt_delete_work; struct work_struct rport_delete_work; } __attribute__((aligned(sizeof(unsigned long)))); diff --git a/trunk/include/scsi/scsi_transport_iscsi.h b/trunk/include/scsi/scsi_transport_iscsi.h index d5c218ddc527..4b95c89c95c9 100644 --- a/trunk/include/scsi/scsi_transport_iscsi.h +++ b/trunk/include/scsi/scsi_transport_iscsi.h @@ -176,7 +176,7 @@ struct iscsi_cls_session { /* recovery fields */ int recovery_tmo; - struct delayed_work recovery_work; + struct work_struct recovery_work; int target_id; diff --git a/trunk/include/sound/ac97_codec.h b/trunk/include/sound/ac97_codec.h index 33720397a904..4c43521cc493 100644 --- a/trunk/include/sound/ac97_codec.h +++ b/trunk/include/sound/ac97_codec.h @@ -511,7 +511,7 @@ struct snd_ac97 { #ifdef CONFIG_SND_AC97_POWER_SAVE unsigned int power_up; /* power states */ struct workqueue_struct *power_workq; - struct delayed_work power_work; + struct work_struct power_work; #endif struct device dev; }; diff --git a/trunk/include/sound/ak4114.h b/trunk/include/sound/ak4114.h index 2ee061625fd0..11702aa0bea9 100644 --- a/trunk/include/sound/ak4114.h +++ b/trunk/include/sound/ak4114.h @@ -182,7 +182,7 @@ struct ak4114 { unsigned char rcs0; unsigned char rcs1; struct workqueue_struct *workqueue; - struct delayed_work work; + struct work_struct work; void *change_callback_private; void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); }; diff --git a/trunk/init/do_mounts_initrd.c b/trunk/init/do_mounts_initrd.c index 2cfd7cb36e79..919a80cb322e 100644 --- a/trunk/init/do_mounts_initrd.c +++ b/trunk/init/do_mounts_initrd.c @@ -6,7 +6,6 @@ #include #include #include -#include #include "do_mounts.h" diff --git a/trunk/init/initramfs.c b/trunk/init/initramfs.c index 85f04037ade1..d28c1094d7e5 100644 --- a/trunk/init/initramfs.c +++ b/trunk/init/initramfs.c @@ -182,10 +182,6 @@ static int __init do_collect(void) static int __init do_header(void) { - if (memcmp(collected, "070707", 6)==0) { - error("incorrect cpio method used: use -H newc option"); - return 1; - } if (memcmp(collected, "070701", 6)) { error("no cpio magic"); return 1; diff --git a/trunk/init/main.c b/trunk/init/main.c index 46508300d6b9..36f608a7cfba 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -73,10 +73,6 @@ #error Sorry, your GCC is too old. It builds incorrect kernels. #endif -#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0 -#warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended. -#endif - static int init(void *); extern void init_IRQ(void); diff --git a/trunk/ipc/compat.c b/trunk/ipc/compat.c index fa18141539fb..4d20cfd38f0a 100644 --- a/trunk/ipc/compat.c +++ b/trunk/ipc/compat.c @@ -115,6 +115,7 @@ struct compat_shm_info { extern int sem_ctls[]; #define sc_semopm (sem_ctls[2]) +#define MAXBUF (64*1024) static inline int compat_ipc_parse_version(int *cmd) { @@ -306,30 +307,35 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) long compat_sys_msgsnd(int first, int second, int third, void __user *uptr) { + struct msgbuf __user *p; struct compat_msgbuf __user *up = uptr; long type; if (first < 0) return -EINVAL; - if (second < 0) + if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf))) return -EINVAL; - if (get_user(type, &up->mtype)) + p = compat_alloc_user_space(second + sizeof(struct msgbuf)); + if (get_user(type, &up->mtype) || + put_user(type, &p->mtype) || + copy_in_user(p->mtext, up->mtext, second)) return -EFAULT; - return do_msgsnd(first, type, up->mtext, second, third); + return sys_msgsnd(first, p, second, third); } long compat_sys_msgrcv(int first, int second, int msgtyp, int third, int version, void __user *uptr) { + struct msgbuf __user *p; struct compat_msgbuf __user *up; long type; int err; if (first < 0) return -EINVAL; - if (second < 0) + if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf))) return -EINVAL; if (!version) { @@ -343,11 +349,14 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third, uptr = compat_ptr(ipck.msgp); msgtyp = ipck.msgtyp; } - up = uptr; - err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third); + p = compat_alloc_user_space(second + sizeof(struct msgbuf)); + err = sys_msgrcv(first, p, second, msgtyp, third); if (err < 0) goto out; - if (put_user(type, &up->mtype)) + up = uptr; + if (get_user(type, &p->mtype) || + put_user(type, &up->mtype) || + copy_in_user(up->mtext, p->mtext, err)) err = -EFAULT; out: return err; diff --git a/trunk/ipc/mqueue.c b/trunk/ipc/mqueue.c index 3acc1661e517..7c274002c9f5 100644 --- a/trunk/ipc/mqueue.c +++ b/trunk/ipc/mqueue.c @@ -90,7 +90,7 @@ static struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); static spinlock_t mq_lock; -static struct kmem_cache *mqueue_inode_cachep; +static kmem_cache_t *mqueue_inode_cachep; static struct vfsmount *mqueue_mnt; static unsigned int queues_count; @@ -211,7 +211,7 @@ static int mqueue_get_sb(struct file_system_type *fs_type, return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; @@ -224,7 +224,7 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; - ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; diff --git a/trunk/ipc/msg.c b/trunk/ipc/msg.c index a388824740e7..1266b1d0c8e3 100644 --- a/trunk/ipc/msg.c +++ b/trunk/ipc/msg.c @@ -626,11 +626,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) return 0; } -long do_msgsnd(int msqid, long mtype, void __user *mtext, - size_t msgsz, int msgflg) +asmlinkage long +sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; + long mtype; int err; struct ipc_namespace *ns; @@ -638,10 +639,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; + if (get_user(mtype, &msgp->mtype)) + return -EFAULT; if (mtype < 1) return -EINVAL; - msg = load_msg(mtext, msgsz); + msg = load_msg(msgp->mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); @@ -720,16 +723,6 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, return err; } -asmlinkage long -sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) -{ - long mtype; - - if (get_user(mtype, &msgp->mtype)) - return -EFAULT; - return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); -} - static inline int convert_mode(long *msgtyp, int msgflg) { /* @@ -749,8 +742,8 @@ static inline int convert_mode(long *msgtyp, int msgflg) return SEARCH_EQUAL; } -long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg) +asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; @@ -896,30 +889,15 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; - *pmtype = msg->m_type; - if (store_msg(mtext, msg, msgsz)) + if (put_user (msg->m_type, &msgp->mtype) || + store_msg(msgp->mtext, msg, msgsz)) { msgsz = -EFAULT; - + } free_msg(msg); return msgsz; } -asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg) -{ - long err, mtype; - - err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); - if (err < 0) - goto out; - - if (put_user(mtype, &msgp->mtype)) - err = -EFAULT; -out: - return err; -} - #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { diff --git a/trunk/ipc/sem.c b/trunk/ipc/sem.c index d3e12efd55cb..21b3289d640c 100644 --- a/trunk/ipc/sem.c +++ b/trunk/ipc/sem.c @@ -1070,13 +1070,14 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ipc_rcu_getref(sma); sem_unlock(sma); - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { ipc_lock_by_ptr(&sma->sem_perm); ipc_rcu_putref(sma); sem_unlock(sma); return ERR_PTR(-ENOMEM); } + memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); new->semadj = (short *) &new[1]; new->semid = semid; diff --git a/trunk/ipc/util.c b/trunk/ipc/util.c index a9b7a227b8d4..cd8bb14a431f 100644 --- a/trunk/ipc/util.c +++ b/trunk/ipc/util.c @@ -514,11 +514,6 @@ void ipc_rcu_getref(void *ptr) container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; } -static void ipc_do_vfree(struct work_struct *work) -{ - vfree(container_of(work, struct ipc_rcu_sched, work)); -} - /** * ipc_schedule_free - free ipc + rcu space * @head: RCU callback structure for queued work @@ -533,7 +528,7 @@ static void ipc_schedule_free(struct rcu_head *head) struct ipc_rcu_sched *sched = container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); - INIT_WORK(&sched->work, ipc_do_vfree); + INIT_WORK(&sched->work, vfree, sched); schedule_work(&sched->work); } diff --git a/trunk/kernel/Kconfig.hz b/trunk/kernel/Kconfig.hz index 4af15802ccd4..248e1c396f8b 100644 --- a/trunk/kernel/Kconfig.hz +++ b/trunk/kernel/Kconfig.hz @@ -7,7 +7,7 @@ choice default HZ_250 help Allows the configuration of the timer frequency. It is customary - to have the timer interrupt run at 1000 Hz but 100 Hz may be more + to have the timer interrupt run at 1000 HZ but 100 HZ may be more beneficial for servers and NUMA systems that do not need to have a fast response for user interaction and that may experience bus contention and cacheline bounces as a result of timer interrupts. @@ -19,30 +19,21 @@ choice config HZ_100 bool "100 HZ" help - 100 Hz is a typical choice for servers, SMP and NUMA systems + 100 HZ is a typical choice for servers, SMP and NUMA systems with lots of processors that may show reduced performance if too many timer interrupts are occurring. config HZ_250 bool "250 HZ" help - 250 Hz is a good compromise choice allowing server performance + 250 HZ is a good compromise choice allowing server performance while also showing good interactive responsiveness even - on SMP and NUMA systems. If you are going to be using NTSC video - or multimedia, selected 300Hz instead. - - config HZ_300 - bool "300 HZ" - help - 300 Hz is a good compromise choice allowing server performance - while also showing good interactive responsiveness even - on SMP and NUMA systems and exactly dividing by both PAL and - NTSC frame rates for video and multimedia work. + on SMP and NUMA systems. config HZ_1000 bool "1000 HZ" help - 1000 Hz is the preferred choice for desktop systems and other + 1000 HZ is the preferred choice for desktop systems and other systems requiring fast interactive responses to events. endchoice @@ -51,6 +42,5 @@ config HZ int default 100 if HZ_100 default 250 if HZ_250 - default 300 if HZ_300 default 1000 if HZ_1000 diff --git a/trunk/kernel/acct.c b/trunk/kernel/acct.c index dc12db8600e7..0aad5ca36a81 100644 --- a/trunk/kernel/acct.c +++ b/trunk/kernel/acct.c @@ -89,8 +89,7 @@ struct acct_glbs { struct timer_list timer; }; -static struct acct_glbs acct_globals __cacheline_aligned = - {__SPIN_LOCK_UNLOCKED(acct_globals.lock)}; +static struct acct_glbs acct_globals __cacheline_aligned = {SPIN_LOCK_UNLOCKED}; /* * Called whenever the timer says to check the free space. diff --git a/trunk/kernel/audit.c b/trunk/kernel/audit.c index d9b690ac684b..98106f6078b0 100644 --- a/trunk/kernel/audit.c +++ b/trunk/kernel/audit.c @@ -57,7 +57,6 @@ #include #include #include -#include #include "audit.h" diff --git a/trunk/kernel/auditfilter.c b/trunk/kernel/auditfilter.c index 2e896f8ae29e..4f40d923af8e 100644 --- a/trunk/kernel/auditfilter.c +++ b/trunk/kernel/auditfilter.c @@ -636,9 +636,10 @@ static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule) struct audit_rule *rule; int i; - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kmalloc(sizeof(*rule), GFP_KERNEL); if (unlikely(!rule)) return NULL; + memset(rule, 0, sizeof(*rule)); rule->flags = krule->flags | krule->listnr; rule->action = krule->action; diff --git a/trunk/kernel/configs.c b/trunk/kernel/configs.c index 8fa1fb28f8a7..f9e31974f4ad 100644 --- a/trunk/kernel/configs.c +++ b/trunk/kernel/configs.c @@ -75,7 +75,7 @@ ikconfig_read_current(struct file *file, char __user *buf, return count; } -static const struct file_operations ikconfig_file_ops = { +static struct file_operations ikconfig_file_ops = { .owner = THIS_MODULE, .read = ikconfig_read_current, }; diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index 9124669f4586..272254f20d97 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -270,7 +270,11 @@ int disable_nonboot_cpus(void) goto out; } } - + error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu)); + if (error) { + printk(KERN_ERR "Could not run on CPU%d\n", first_cpu); + goto out; + } /* We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time */ diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index 0a6b4d89f9a0..6313c38c930e 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -729,11 +729,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) } /* Remaining checks don't apply to root cpuset */ - if (cur == &top_cpuset) + if ((par = cur->parent) == NULL) return 0; - par = cur->parent; - /* We must be a subset of our parent cpuset */ if (!is_cpuset_subset(trial, par)) return -EACCES; @@ -1062,7 +1060,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) cpu_exclusive_changed = (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); mutex_lock(&callback_mutex); - cs->flags = trialcs.flags; + if (turning_on) + set_bit(bit, &cs->flags); + else + clear_bit(bit, &cs->flags); mutex_unlock(&callback_mutex); if (cpu_exclusive_changed) @@ -1280,8 +1281,7 @@ typedef enum { FILE_TASKLIST, } cpuset_filetype_t; -static ssize_t cpuset_common_file_write(struct file *file, - const char __user *userbuf, +static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) { struct cpuset *cs = __d_cs(file->f_dentry->d_parent); @@ -1292,7 +1292,7 @@ static ssize_t cpuset_common_file_write(struct file *file, int retval = 0; /* Crude upper limit on largest legitimate cpulist user might write. */ - if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) + if (nbytes > 100 + 6 * NR_CPUS) return -E2BIG; /* +1 for nul-terminator */ @@ -1532,7 +1532,7 @@ static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, return simple_rename(old_dir, old_dentry, new_dir, new_dentry); } -static const struct file_operations cpuset_file_operations = { +static struct file_operations cpuset_file_operations = { .read = cpuset_file_read, .write = cpuset_file_write, .llseek = generic_file_llseek, @@ -2045,6 +2045,7 @@ int __init cpuset_init(void) return err; } +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG) /* * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs * or memory nodes, we need to walk over the cpuset hierarchy, @@ -2108,7 +2109,9 @@ static void common_cpu_mem_hotplug_unplug(void) mutex_unlock(&callback_mutex); mutex_unlock(&manage_mutex); } +#endif +#ifdef CONFIG_HOTPLUG_CPU /* * The top_cpuset tracks what CPUs and Memory Nodes are online, * period. This is necessary in order to make cpusets transparent @@ -2125,6 +2128,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb, common_cpu_mem_hotplug_unplug(); return 0; } +#endif #ifdef CONFIG_MEMORY_HOTPLUG /* @@ -2606,7 +2610,7 @@ static int cpuset_open(struct inode *inode, struct file *file) return single_open(file, proc_cpuset_show, pid); } -const struct file_operations proc_cpuset_operations = { +struct file_operations proc_cpuset_operations = { .open = cpuset_open, .read = seq_read, .llseek = seq_lseek, diff --git a/trunk/kernel/delayacct.c b/trunk/kernel/delayacct.c index 766d5912b26a..66a0ea48751d 100644 --- a/trunk/kernel/delayacct.c +++ b/trunk/kernel/delayacct.c @@ -20,7 +20,7 @@ #include int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ -struct kmem_cache *delayacct_cache; +kmem_cache_t *delayacct_cache; static int __init delayacct_setup_disable(char *str) { @@ -41,7 +41,7 @@ void delayacct_init(void) void __delayacct_tsk_init(struct task_struct *tsk) { - tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); + tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); if (tsk->delays) spin_lock_init(&tsk->delays->lock); } diff --git a/trunk/kernel/dma.c b/trunk/kernel/dma.c index 937b13ca33ba..2020644c938a 100644 --- a/trunk/kernel/dma.c +++ b/trunk/kernel/dma.c @@ -140,7 +140,7 @@ static int proc_dma_open(struct inode *inode, struct file *file) return single_open(file, proc_dma_show, NULL); } -static const struct file_operations proc_dma_operations = { +static struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 4e3f919edc48..06de6c4e8ca3 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -850,7 +850,9 @@ static void exit_notify(struct task_struct *tsk) fastcall NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; + struct taskstats *tidstats; int group_dead; + unsigned int mycpu; profile_task_exit(tsk); @@ -888,6 +890,8 @@ fastcall NORET_TYPE void do_exit(long code) current->comm, current->pid, preempt_count()); + taskstats_exit_alloc(&tidstats, &mycpu); + acct_update_integrals(tsk); if (tsk->mm) { update_hiwater_rss(tsk->mm); @@ -907,8 +911,8 @@ fastcall NORET_TYPE void do_exit(long code) #endif if (unlikely(tsk->audit_context)) audit_free(tsk); - - taskstats_exit(tsk, group_dead); + taskstats_exit_send(tsk, tidstats, group_dead, mycpu); + taskstats_exit_free(tidstats); exit_mm(tsk); diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 658838148647..8cdd3e72ba55 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -82,26 +82,26 @@ int nr_processes(void) #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) -static struct kmem_cache *task_struct_cachep; +static kmem_cache_t *task_struct_cachep; #endif /* SLAB cache for signal_struct structures (tsk->signal) */ -static struct kmem_cache *signal_cachep; +static kmem_cache_t *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ -struct kmem_cache *sighand_cachep; +kmem_cache_t *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ -struct kmem_cache *files_cachep; +kmem_cache_t *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ -struct kmem_cache *fs_cachep; +kmem_cache_t *fs_cachep; /* SLAB cache for vm_area_struct structures */ -struct kmem_cache *vm_area_cachep; +kmem_cache_t *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ -static struct kmem_cache *mm_cachep; +static kmem_cache_t *mm_cachep; void free_task(struct task_struct *tsk) { @@ -237,7 +237,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) goto fail_nomem; charge = len; } - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; @@ -319,7 +319,7 @@ static inline void mm_free_pgd(struct mm_struct * mm) __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); -#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) +#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) #include @@ -448,16 +448,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) tsk->vfork_done = NULL; complete(vfork_done); } - - /* - * If we're exiting normally, clear a user-space tid field if - * requested. We leave this alone when dying by signal, to leave - * the value intact in a core dump, and to save the unnecessary - * trouble otherwise. Userland only wants this done for a sys_exit. - */ - if (tsk->clear_child_tid - && !(tsk->flags & PF_SIGNALED) - && atomic_read(&mm->mm_users) > 1) { + if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) { u32 __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; @@ -488,10 +479,6 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) memcpy(mm, oldmm, sizeof(*mm)); - /* Initializing for Swap token stuff */ - mm->token_priority = 0; - mm->last_interval = 0; - if (!mm_init(mm)) goto fail_nomem; @@ -555,10 +542,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) goto fail_nomem; good_mm: - /* Initializing for Swap token stuff */ - mm->token_priority = 0; - mm->last_interval = 0; - tsk->mm = mm; tsk->active_mm = mm; return 0; @@ -630,7 +613,7 @@ static struct files_struct *alloc_files(void) struct files_struct *newf; struct fdtable *fdt; - newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); + newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); if (!newf) goto out; @@ -847,6 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); + taskstats_tgid_alloc(current); return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); @@ -1429,7 +1413,7 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) +static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) { struct sighand_struct *sighand = data; diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 95989a3b4168..93ef30ba209f 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -282,9 +282,9 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; - pagefault_disable(); + inc_preempt_count(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); - pagefault_enable(); + dec_preempt_count(); return ret ? -EFAULT : 0; } @@ -324,11 +324,12 @@ static int refill_pi_state_cache(void) if (likely(current->pi_state_cache)) return 0; - pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); + pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; + memset(pi_state, 0, sizeof(*pi_state)); INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; @@ -552,7 +553,7 @@ static void wake_futex(struct futex_q *q) * at the end of wake_up_all() does not prevent this store from * moving. */ - smp_wmb(); + wmb(); q->lock_ptr = NULL; } @@ -584,9 +585,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!(uval & FUTEX_OWNER_DIED)) { newval = FUTEX_WAITERS | new_owner->pid; - pagefault_disable(); + inc_preempt_count(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); - pagefault_enable(); + dec_preempt_count(); if (curval == -EFAULT) return -EFAULT; if (curval != uval) @@ -617,9 +618,9 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ - pagefault_disable(); + inc_preempt_count(); oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); - pagefault_enable(); + dec_preempt_count(); if (oldval == -EFAULT) return oldval; @@ -1157,9 +1158,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, */ newval = current->pid; - pagefault_disable(); + inc_preempt_count(); curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); - pagefault_enable(); + dec_preempt_count(); if (unlikely(curval == -EFAULT)) goto uaddr_faulted; @@ -1182,9 +1183,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, uval = curval; newval = uval | FUTEX_WAITERS; - pagefault_disable(); + inc_preempt_count(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); - pagefault_enable(); + dec_preempt_count(); if (unlikely(curval == -EFAULT)) goto uaddr_faulted; @@ -1214,10 +1215,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, newval = current->pid | FUTEX_OWNER_DIED | FUTEX_WAITERS; - pagefault_disable(); + inc_preempt_count(); curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); - pagefault_enable(); + dec_preempt_count(); if (unlikely(curval == -EFAULT)) goto uaddr_faulted; @@ -1389,9 +1390,9 @@ static int futex_unlock_pi(u32 __user *uaddr) * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED)) { - pagefault_disable(); + inc_preempt_count(); uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); - pagefault_enable(); + dec_preempt_count(); } if (unlikely(uval == -EFAULT)) @@ -1492,7 +1493,7 @@ static unsigned int futex_poll(struct file *filp, return ret; } -static const struct file_operations futex_fops = { +static struct file_operations futex_fops = { .release = futex_close, .poll = futex_poll, }; @@ -1857,16 +1858,10 @@ static struct file_system_type futex_fs_type = { static int __init init(void) { - int i = register_filesystem(&futex_fs_type); - - if (i) - return i; + unsigned int i; + register_filesystem(&futex_fs_type); futex_mnt = kern_mount(&futex_fs_type); - if (IS_ERR(futex_mnt)) { - unregister_filesystem(&futex_fs_type); - return PTR_ERR(futex_mnt); - } for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { INIT_LIST_HEAD(&futex_queues[i].chain); diff --git a/trunk/kernel/irq/handle.c b/trunk/kernel/irq/handle.c index aff1f0fabb0d..a681912bc89a 100644 --- a/trunk/kernel/irq/handle.c +++ b/trunk/kernel/irq/handle.c @@ -54,7 +54,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = SPIN_LOCK_UNLOCKED, #ifdef CONFIG_SMP .affinity = CPU_MASK_ALL #endif diff --git a/trunk/kernel/kallsyms.c b/trunk/kernel/kallsyms.c index ab63cfc42992..eeac3e313b2b 100644 --- a/trunk/kernel/kallsyms.c +++ b/trunk/kernel/kallsyms.c @@ -20,7 +20,6 @@ #include #include /* for cond_resched */ #include -#include #include @@ -302,6 +301,13 @@ struct kallsym_iter char name[KSYM_NAME_LEN+1]; }; +/* Only label it "global" if it is exported. */ +static void upcase_if_global(struct kallsym_iter *iter) +{ + if (is_exported(iter->name, iter->owner)) + iter->type += 'A' - 'a'; +} + static int get_ksymbol_mod(struct kallsym_iter *iter) { iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms, @@ -310,10 +316,7 @@ static int get_ksymbol_mod(struct kallsym_iter *iter) if (iter->owner == NULL) return 0; - /* Label it "global" if it is exported, "local" if not exported. */ - iter->type = is_exported(iter->name, iter->owner) - ? toupper(iter->type) : tolower(iter->type); - + upcase_if_global(iter); return 1; } @@ -398,7 +401,7 @@ static int s_show(struct seq_file *m, void *p) return 0; } -static const struct seq_operations kallsyms_op = { +static struct seq_operations kallsyms_op = { .start = s_start, .next = s_next, .stop = s_stop, @@ -433,7 +436,7 @@ static int kallsyms_release(struct inode *inode, struct file *file) return seq_release(inode, file); } -static const struct file_operations kallsyms_operations = { +static struct file_operations kallsyms_operations = { .open = kallsyms_open, .read = seq_read, .llseek = seq_lseek, diff --git a/trunk/kernel/kexec.c b/trunk/kernel/kexec.c index afbbbe981be2..fcdd5d2bc3f4 100644 --- a/trunk/kernel/kexec.c +++ b/trunk/kernel/kexec.c @@ -20,8 +20,6 @@ #include #include #include -#include -#include #include #include @@ -110,10 +108,11 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, /* Allocate a controlling structure */ result = -ENOMEM; - image = kzalloc(sizeof(*image), GFP_KERNEL); + image = kmalloc(sizeof(*image), GFP_KERNEL); if (!image) goto out; + memset(image, 0, sizeof(*image)); image->head = 0; image->entry = &image->head; image->last_entry = &image->head; @@ -1068,60 +1067,6 @@ void crash_kexec(struct pt_regs *regs) } } -static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, - size_t data_len) -{ - struct elf_note note; - - note.n_namesz = strlen(name) + 1; - note.n_descsz = data_len; - note.n_type = type; - memcpy(buf, ¬e, sizeof(note)); - buf += (sizeof(note) + 3)/4; - memcpy(buf, name, note.n_namesz); - buf += (note.n_namesz + 3)/4; - memcpy(buf, data, note.n_descsz); - buf += (note.n_descsz + 3)/4; - - return buf; -} - -static void final_note(u32 *buf) -{ - struct elf_note note; - - note.n_namesz = 0; - note.n_descsz = 0; - note.n_type = 0; - memcpy(buf, ¬e, sizeof(note)); -} - -void crash_save_cpu(struct pt_regs *regs, int cpu) -{ - struct elf_prstatus prstatus; - u32 *buf; - - if ((cpu < 0) || (cpu >= NR_CPUS)) - return; - - /* Using ELF notes here is opportunistic. - * I need a well defined structure format - * for the data I pass, and I need tags - * on the data to indicate what information I have - * squirrelled away. ELF notes happen to provide - * all of that, so there is no need to invent something new. - */ - buf = (u32*)per_cpu_ptr(crash_notes, cpu); - if (!buf) - return; - memset(&prstatus, 0, sizeof(prstatus)); - prstatus.pr_pid = current->pid; - elf_core_copy_regs(&prstatus.pr_reg, regs); - buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, - sizeof(prstatus)); - final_note(buf); -} - static int __init crash_notes_memory_init(void) { /* Allocate memory for saving cpu registers. */ diff --git a/trunk/kernel/kmod.c b/trunk/kernel/kmod.c index 8d2bea09a4ec..2b76dee28496 100644 --- a/trunk/kernel/kmod.c +++ b/trunk/kernel/kmod.c @@ -114,7 +114,6 @@ EXPORT_SYMBOL(request_module); #endif /* CONFIG_KMOD */ struct subprocess_info { - struct work_struct work; struct completion *complete; char *path; char **argv; @@ -222,10 +221,9 @@ static int wait_for_helper(void *data) } /* This is run by khelper thread */ -static void __call_usermodehelper(struct work_struct *work) +static void __call_usermodehelper(void *data) { - struct subprocess_info *sub_info = - container_of(work, struct subprocess_info, work); + struct subprocess_info *sub_info = data; pid_t pid; int wait = sub_info->wait; @@ -266,8 +264,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, { DECLARE_COMPLETION_ONSTACK(done); struct subprocess_info sub_info = { - .work = __WORK_INITIALIZER(sub_info.work, - __call_usermodehelper), .complete = &done, .path = path, .argv = argv, @@ -276,6 +272,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, .wait = wait, .retval = 0, }; + DECLARE_WORK(work, __call_usermodehelper, &sub_info); if (!khelper_wq) return -EBUSY; @@ -283,7 +280,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, if (path[0] == '\0') return 0; - queue_work(khelper_wq, &sub_info.work); + queue_work(khelper_wq, &work); wait_for_completion(&done); return sub_info.retval; } @@ -294,8 +291,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, { DECLARE_COMPLETION(done); struct subprocess_info sub_info = { - .work = __WORK_INITIALIZER(sub_info.work, - __call_usermodehelper), .complete = &done, .path = path, .argv = argv, @@ -303,6 +298,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, .retval = 0, }; struct file *f; + DECLARE_WORK(work, __call_usermodehelper, &sub_info); if (!khelper_wq) return -EBUSY; @@ -322,7 +318,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, } sub_info.stdin = f; - queue_work(khelper_wq, &sub_info.work); + queue_work(khelper_wq, &work); wait_for_completion(&done); return sub_info.retval; } diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c index 17ec4afb0994..610c837ad9e0 100644 --- a/trunk/kernel/kprobes.c +++ b/trunk/kernel/kprobes.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -84,36 +83,9 @@ struct kprobe_insn_page { kprobe_opcode_t *insns; /* Page of instruction slots */ char slot_used[INSNS_PER_PAGE]; int nused; - int ngarbage; }; static struct hlist_head kprobe_insn_pages; -static int kprobe_garbage_slots; -static int collect_garbage_slots(void); - -static int __kprobes check_safety(void) -{ - int ret = 0; -#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) - ret = freeze_processes(); - if (ret == 0) { - struct task_struct *p, *q; - do_each_thread(p, q) { - if (p != current && p->state == TASK_RUNNING && - p->pid != 0) { - printk("Check failed: %s is running\n",p->comm); - ret = -1; - goto loop_end; - } - } while_each_thread(p, q); - } -loop_end: - thaw_processes(); -#else - synchronize_sched(); -#endif - return ret; -} /** * get_insn_slot() - Find a slot on an executable page for an instruction. @@ -124,7 +96,6 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) struct kprobe_insn_page *kip; struct hlist_node *pos; - retry: hlist_for_each(pos, &kprobe_insn_pages) { kip = hlist_entry(pos, struct kprobe_insn_page, hlist); if (kip->nused < INSNS_PER_PAGE) { @@ -141,11 +112,7 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) } } - /* If there are any garbage slots, collect it and try again. */ - if (kprobe_garbage_slots && collect_garbage_slots() == 0) { - goto retry; - } - /* All out of space. Need to allocate a new page. Use slot 0. */ + /* All out of space. Need to allocate a new page. Use slot 0.*/ kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); if (!kip) { return NULL; @@ -166,62 +133,10 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) memset(kip->slot_used, 0, INSNS_PER_PAGE); kip->slot_used[0] = 1; kip->nused = 1; - kip->ngarbage = 0; return kip->insns; } -/* Return 1 if all garbages are collected, otherwise 0. */ -static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) -{ - kip->slot_used[idx] = 0; - kip->nused--; - if (kip->nused == 0) { - /* - * Page is no longer in use. Free it unless - * it's the last one. We keep the last one - * so as not to have to set it up again the - * next time somebody inserts a probe. - */ - hlist_del(&kip->hlist); - if (hlist_empty(&kprobe_insn_pages)) { - INIT_HLIST_NODE(&kip->hlist); - hlist_add_head(&kip->hlist, - &kprobe_insn_pages); - } else { - module_free(NULL, kip->insns); - kfree(kip); - } - return 1; - } - return 0; -} - -static int __kprobes collect_garbage_slots(void) -{ - struct kprobe_insn_page *kip; - struct hlist_node *pos, *next; - - /* Ensure no-one is preepmted on the garbages */ - if (check_safety() != 0) - return -EAGAIN; - - hlist_for_each_safe(pos, next, &kprobe_insn_pages) { - int i; - kip = hlist_entry(pos, struct kprobe_insn_page, hlist); - if (kip->ngarbage == 0) - continue; - kip->ngarbage = 0; /* we will collect all garbages */ - for (i = 0; i < INSNS_PER_PAGE; i++) { - if (kip->slot_used[i] == -1 && - collect_one_slot(kip, i)) - break; - } - } - kprobe_garbage_slots = 0; - return 0; -} - -void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) +void __kprobes free_insn_slot(kprobe_opcode_t *slot) { struct kprobe_insn_page *kip; struct hlist_node *pos; @@ -231,18 +146,28 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) if (kip->insns <= slot && slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { int i = (slot - kip->insns) / MAX_INSN_SIZE; - if (dirty) { - kip->slot_used[i] = -1; - kip->ngarbage++; - } else { - collect_one_slot(kip, i); + kip->slot_used[i] = 0; + kip->nused--; + if (kip->nused == 0) { + /* + * Page is no longer in use. Free it unless + * it's the last one. We keep the last one + * so as not to have to set it up again the + * next time somebody inserts a probe. + */ + hlist_del(&kip->hlist); + if (hlist_empty(&kprobe_insn_pages)) { + INIT_HLIST_NODE(&kip->hlist); + hlist_add_head(&kip->hlist, + &kprobe_insn_pages); + } else { + module_free(NULL, kip->insns); + kfree(kip); + } } - break; + return; } } - if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) { - collect_garbage_slots(); - } } #endif diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c index 1db8c72d0d38..4f9c60ef95e8 100644 --- a/trunk/kernel/kthread.c +++ b/trunk/kernel/kthread.c @@ -31,8 +31,6 @@ struct kthread_create_info /* Result passed back to kthread_create() from keventd. */ struct task_struct *result; struct completion done; - - struct work_struct work; }; struct kthread_stop_info @@ -113,10 +111,9 @@ static int kthread(void *_create) } /* We are keventd: create a thread. */ -static void keventd_create_kthread(struct work_struct *work) +static void keventd_create_kthread(void *_create) { - struct kthread_create_info *create = - container_of(work, struct kthread_create_info, work); + struct kthread_create_info *create = _create; int pid; /* We want our own signal handler (we take no signals by default). */ @@ -157,20 +154,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), ...) { struct kthread_create_info create; + DECLARE_WORK(work, keventd_create_kthread, &create); create.threadfn = threadfn; create.data = data; init_completion(&create.started); init_completion(&create.done); - INIT_WORK(&create.work, keventd_create_kthread); /* * The workqueue needs to start up first: */ if (!helper_wq) - create.work.func(&create.work); + work.func(work.data); else { - queue_work(helper_wq, &create.work); + queue_work(helper_wq, &work); wait_for_completion(&create.done); } if (!IS_ERR(create.result)) { diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 62e73ce68197..c9fefdb1a7db 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -140,6 +140,13 @@ void lockdep_on(void) EXPORT_SYMBOL(lockdep_on); +int lockdep_internal(void) +{ + return current->lockdep_recursion != 0; +} + +EXPORT_SYMBOL(lockdep_internal); + /* * Debugging switches: */ @@ -230,10 +237,8 @@ e locks. */ trace->max_entries = trace->nr_entries; nr_stack_trace_entries += trace->nr_entries; - if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) { - __raw_spin_unlock(&hash_lock); + if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) return 0; - } if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { __raw_spin_unlock(&hash_lock); @@ -352,7 +357,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 static void print_lock_name(struct lock_class *class) { - char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4; + char str[128], c1, c2, c3, c4; const char *name; get_usage_chars(class, &c1, &c2, &c3, &c4); @@ -374,7 +379,7 @@ static void print_lock_name(struct lock_class *class) static void print_lockdep_cache(struct lockdep_map *lock) { const char *name; - char str[KSYM_NAME_LEN + 1]; + char str[128]; name = lock->name; if (!name) @@ -444,9 +449,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) print_lock_class_header(class, depth); list_for_each_entry(entry, &class->locks_after, entry) { - if (DEBUG_LOCKS_WARN_ON(!entry->class)) - return; - + DEBUG_LOCKS_WARN_ON(!entry->class); print_lock_dependencies(entry->class, depth + 1); printk("%*s ... acquired at:\n",depth,""); @@ -471,8 +474,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, return 0; entry->class = this; - if (!save_trace(&entry->trace)) - return 0; + save_trace(&entry->trace); /* * Since we never remove from the dependency list, the list can @@ -560,12 +562,8 @@ static noinline int print_circular_bug_tail(void) if (debug_locks_silent) return 0; - /* hash_lock unlocked by the header */ - __raw_spin_lock(&hash_lock); this.class = check_source->class; - if (!save_trace(&this.trace)) - return 0; - __raw_spin_unlock(&hash_lock); + save_trace(&this.trace); print_circular_bug_entry(&this, 0); printk("\nother info that might help us debug this:\n\n"); @@ -968,11 +966,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, &prev->class->locks_after, next->acquire_ip); if (!ret) return 0; - + /* + * Return value of 2 signals 'dependency already added', + * in that case we dont have to add the backlink either. + */ + if (ret == 2) + return 2; ret = add_lock_to_list(next->class, prev->class, &next->class->locks_before, next->acquire_ip); - if (!ret) - return 0; /* * Debugging printouts: @@ -1024,8 +1025,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) * added: */ if (hlock->read != 2) { - if (!check_prev_add(curr, hlock, next)) - return 0; + check_prev_add(curr, hlock, next); /* * Stop after the first non-trylock entry, * as non-trylock entries have added their @@ -1182,7 +1182,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; - unsigned long flags; class = look_up_lock_class(lock, subclass); if (likely(class)) @@ -1204,7 +1203,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) key = lock->key->subkeys + subclass; hash_head = classhashentry(key); - raw_local_irq_save(flags); __raw_spin_lock(&hash_lock); /* * We have to do the hash-walk again, to avoid races @@ -1219,7 +1217,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { __raw_spin_unlock(&hash_lock); - raw_local_irq_restore(flags); debug_locks_off(); printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); printk("turning off the locking correctness validator.\n"); @@ -1242,18 +1239,15 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) if (verbose(class)) { __raw_spin_unlock(&hash_lock); - raw_local_irq_restore(flags); printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); - raw_local_irq_save(flags); __raw_spin_lock(&hash_lock); } out_unlock_set: __raw_spin_unlock(&hash_lock); - raw_local_irq_restore(flags); if (!subclass || force) lock->class_cache = class; @@ -1734,7 +1728,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, debug_atomic_dec(&nr_unused_locks); break; default: - __raw_spin_unlock(&hash_lock); debug_locks_off(); WARN_ON(1); return 0; @@ -2652,7 +2645,6 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) } local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); static void print_held_locks_bug(struct task_struct *curr) { diff --git a/trunk/kernel/lockdep_internals.h b/trunk/kernel/lockdep_internals.h index 8ce09bc4613d..eab043c83bb2 100644 --- a/trunk/kernel/lockdep_internals.h +++ b/trunk/kernel/lockdep_internals.h @@ -20,7 +20,7 @@ #define MAX_LOCKDEP_KEYS_BITS 11 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) -#define MAX_LOCKDEP_CHAINS_BITS 14 +#define MAX_LOCKDEP_CHAINS_BITS 13 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) /* diff --git a/trunk/kernel/lockdep_proc.c b/trunk/kernel/lockdep_proc.c index b554b40a4aa6..f6e72eaab3fa 100644 --- a/trunk/kernel/lockdep_proc.c +++ b/trunk/kernel/lockdep_proc.c @@ -113,7 +113,7 @@ static int l_show(struct seq_file *m, void *v) return 0; } -static const struct seq_operations lockdep_ops = { +static struct seq_operations lockdep_ops = { .start = l_start, .next = l_next, .stop = l_stop, @@ -135,7 +135,7 @@ static int lockdep_open(struct inode *inode, struct file *file) return res; } -static const struct file_operations proc_lockdep_operations = { +static struct file_operations proc_lockdep_operations = { .open = lockdep_open, .read = seq_read, .llseek = seq_lseek, @@ -319,7 +319,7 @@ static int lockdep_stats_open(struct inode *inode, struct file *file) return single_open(file, lockdep_stats_show, NULL); } -static const struct file_operations proc_lockdep_stats_operations = { +static struct file_operations proc_lockdep_stats_operations = { .open = lockdep_stats_open, .read = seq_read, .llseek = seq_lseek, diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index d9eae45d0145..e2d09d604ca0 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -2209,7 +2209,7 @@ static int m_show(struct seq_file *m, void *p) Where refcount is a number or -, and deps is a comma-separated list of depends or -. */ -const struct seq_operations modules_op = { +struct seq_operations modules_op = { .start = m_start, .next = m_next, .stop = m_stop, diff --git a/trunk/kernel/mutex-debug.c b/trunk/kernel/mutex-debug.c index 841539d72c55..18651641a7b5 100644 --- a/trunk/kernel/mutex-debug.c +++ b/trunk/kernel/mutex-debug.c @@ -77,9 +77,6 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, void debug_mutex_unlock(struct mutex *lock) { - if (unlikely(!debug_locks)) - return; - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); diff --git a/trunk/kernel/pid.c b/trunk/kernel/pid.c index a48879b0b921..b914392085f9 100644 --- a/trunk/kernel/pid.c +++ b/trunk/kernel/pid.c @@ -31,7 +31,7 @@ #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) static struct hlist_head *pid_hash; static int pidhash_shift; -static struct kmem_cache *pid_cachep; +static kmem_cache_t *pid_cachep; int pid_max = PID_MAX_DEFAULT; diff --git a/trunk/kernel/posix-timers.c b/trunk/kernel/posix-timers.c index 5fe87de10ff0..9cbb5d1be06f 100644 --- a/trunk/kernel/posix-timers.c +++ b/trunk/kernel/posix-timers.c @@ -70,7 +70,7 @@ /* * Lets keep our timers in a slab cache :-) */ -static struct kmem_cache *posix_timers_cache; +static kmem_cache_t *posix_timers_cache; static struct idr posix_timers_id; static DEFINE_SPINLOCK(idr_lock); diff --git a/trunk/kernel/power/Kconfig b/trunk/kernel/power/Kconfig index 710ed084e7c5..825068ca3479 100644 --- a/trunk/kernel/power/Kconfig +++ b/trunk/kernel/power/Kconfig @@ -78,7 +78,7 @@ config PM_SYSFS_DEPRECATED config SOFTWARE_SUSPEND bool "Software Suspend" - depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) + depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP)) ---help--- Enable the possibility of suspending the machine. It doesn't need ACPI or APM. diff --git a/trunk/kernel/power/disk.c b/trunk/kernel/power/disk.c index 0b00f56c2ad0..b1fb7866b0b3 100644 --- a/trunk/kernel/power/disk.c +++ b/trunk/kernel/power/disk.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "power.h" @@ -28,23 +27,6 @@ static int noresume = 0; char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; -sector_t swsusp_resume_block; - -/** - * platform_prepare - prepare the machine for hibernation using the - * platform driver if so configured and return an error code if it fails - */ - -static inline int platform_prepare(void) -{ - int error = 0; - - if (pm_disk_mode == PM_DISK_PLATFORM) { - if (pm_ops && pm_ops->prepare) - error = pm_ops->prepare(PM_SUSPEND_DISK); - } - return error; -} /** * power_down - Shut machine down for hibernate. @@ -58,10 +40,12 @@ static inline int platform_prepare(void) static void power_down(suspend_disk_method_t mode) { + int error = 0; + switch(mode) { case PM_DISK_PLATFORM: kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); - pm_ops->enter(PM_SUSPEND_DISK); + error = pm_ops->enter(PM_SUSPEND_DISK); break; case PM_DISK_SHUTDOWN: kernel_power_off(); @@ -106,18 +90,12 @@ static int prepare_processes(void) goto thaw; } - error = platform_prepare(); - if (error) - goto thaw; - /* Free memory before shutting down devices. */ if (!(error = swsusp_shrink_memory())) return 0; - - platform_finish(); - thaw: +thaw: thaw_processes(); - enable_cpus: +enable_cpus: enable_nonboot_cpus(); pm_restore_console(); return error; @@ -149,7 +127,7 @@ int pm_suspend_disk(void) return error; if (pm_disk_mode == PM_DISK_TESTPROC) - return 0; + goto Thaw; suspend_console(); error = device_suspend(PMSG_FREEZE); @@ -211,10 +189,10 @@ static int software_resume(void) { int error; - mutex_lock(&pm_mutex); + down(&pm_sem); if (!swsusp_resume_device) { if (!strlen(resume_file)) { - mutex_unlock(&pm_mutex); + up(&pm_sem); return -ENOENT; } swsusp_resume_device = name_to_dev_t(resume_file); @@ -229,7 +207,7 @@ static int software_resume(void) * FIXME: If noresume is specified, we need to find the partition * and reset it back to normal swap space. */ - mutex_unlock(&pm_mutex); + up(&pm_sem); return 0; } @@ -273,7 +251,7 @@ static int software_resume(void) unprepare_processes(); Done: /* For success case, the suspend path will release the lock */ - mutex_unlock(&pm_mutex); + up(&pm_sem); pr_debug("PM: Resume from disk failed.\n"); return 0; } @@ -334,7 +312,7 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) p = memchr(buf, '\n', n); len = p ? p - buf : n; - mutex_lock(&pm_mutex); + down(&pm_sem); for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) { if (!strncmp(buf, pm_disk_modes[i], len)) { mode = i; @@ -358,7 +336,7 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) pr_debug("PM: suspend-to-disk mode set to '%s'\n", pm_disk_modes[mode]); - mutex_unlock(&pm_mutex); + up(&pm_sem); return error ? error : n; } @@ -383,14 +361,14 @@ static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n) if (maj != MAJOR(res) || min != MINOR(res)) goto out; - mutex_lock(&pm_mutex); + down(&pm_sem); swsusp_resume_device = res; - mutex_unlock(&pm_mutex); + up(&pm_sem); printk("Attempting manual resume\n"); noresume = 0; software_resume(); ret = n; - out: +out: return ret; } @@ -445,19 +423,6 @@ static int __init resume_setup(char *str) return 1; } -static int __init resume_offset_setup(char *str) -{ - unsigned long long offset; - - if (noresume) - return 1; - - if (sscanf(str, "%llu", &offset) == 1) - swsusp_resume_block = offset; - - return 1; -} - static int __init noresume_setup(char *str) { noresume = 1; @@ -465,5 +430,4 @@ static int __init noresume_setup(char *str) } __setup("noresume", noresume_setup); -__setup("resume_offset=", resume_offset_setup); __setup("resume=", resume_setup); diff --git a/trunk/kernel/power/main.c b/trunk/kernel/power/main.c index 500eb87f643d..873228c71dab 100644 --- a/trunk/kernel/power/main.c +++ b/trunk/kernel/power/main.c @@ -8,7 +8,6 @@ * */ -#include #include #include #include @@ -19,14 +18,13 @@ #include #include #include -#include #include "power.h" /*This is just an arbitrary number */ #define FREE_PAGE_NUMBER (100) -DEFINE_MUTEX(pm_mutex); +DECLARE_MUTEX(pm_sem); struct pm_ops *pm_ops; suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; @@ -38,9 +36,9 @@ suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; void pm_set_ops(struct pm_ops * ops) { - mutex_lock(&pm_mutex); + down(&pm_sem); pm_ops = ops; - mutex_unlock(&pm_mutex); + up(&pm_sem); } @@ -184,7 +182,7 @@ static int enter_state(suspend_state_t state) if (!valid_state(state)) return -ENODEV; - if (!mutex_trylock(&pm_mutex)) + if (down_trylock(&pm_sem)) return -EBUSY; if (state == PM_SUSPEND_DISK) { @@ -202,7 +200,7 @@ static int enter_state(suspend_state_t state) pr_debug("PM: Finishing wakeup.\n"); suspend_finish(state); Unlock: - mutex_unlock(&pm_mutex); + up(&pm_sem); return error; } @@ -231,7 +229,7 @@ int pm_suspend(suspend_state_t state) return -EINVAL; } -EXPORT_SYMBOL(pm_suspend); + decl_subsys(power,NULL,NULL); diff --git a/trunk/kernel/power/power.h b/trunk/kernel/power/power.h index eb461b816bf4..bfe999f7b272 100644 --- a/trunk/kernel/power/power.h +++ b/trunk/kernel/power/power.h @@ -22,9 +22,7 @@ static inline int pm_suspend_disk(void) return -EPERM; } #endif - -extern struct mutex pm_mutex; - +extern struct semaphore pm_sem; #define power_attr(_name) \ static struct subsys_attribute _name##_attr = { \ .attr = { \ @@ -44,7 +42,6 @@ extern const void __nosave_begin, __nosave_end; extern unsigned long image_size; extern int in_suspend; extern dev_t swsusp_resume_device; -extern sector_t swsusp_resume_block; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); @@ -105,18 +102,8 @@ struct snapshot_handle { extern unsigned int snapshot_additional_pages(struct zone *zone); extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); -extern void snapshot_write_finalize(struct snapshot_handle *handle); extern int snapshot_image_loaded(struct snapshot_handle *handle); - -/* - * This structure is used to pass the values needed for the identification - * of the resume swap area from a user space to the kernel via the - * SNAPSHOT_SET_SWAP_AREA ioctl - */ -struct resume_swap_area { - loff_t offset; - u_int32_t dev; -} __attribute__((packed)); +extern void snapshot_free_unused_memory(struct snapshot_handle *handle); #define SNAPSHOT_IOC_MAGIC '3' #define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) @@ -130,14 +117,7 @@ struct resume_swap_area { #define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9) #define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int) #define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11) -#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int) -#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \ - struct resume_swap_area) -#define SNAPSHOT_IOC_MAXNR 13 - -#define PMOPS_PREPARE 1 -#define PMOPS_ENTER 2 -#define PMOPS_FINISH 3 +#define SNAPSHOT_IOC_MAXNR 11 /** * The bitmap is used for tracing allocated swap pages @@ -161,7 +141,7 @@ struct bitmap_page { extern void free_bitmap(struct bitmap_page *bitmap); extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits); -extern sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap); +extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap); extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap); extern int swsusp_check(void); @@ -173,7 +153,3 @@ extern int swsusp_read(void); extern int swsusp_write(void); extern void swsusp_close(void); extern int suspend_enter(suspend_state_t state); - -struct timeval; -extern void swsusp_show_speed(struct timeval *, struct timeval *, - unsigned int, char *); diff --git a/trunk/kernel/power/poweroff.c b/trunk/kernel/power/poweroff.c index 678ec736076b..f1f900ac3164 100644 --- a/trunk/kernel/power/poweroff.c +++ b/trunk/kernel/power/poweroff.c @@ -16,12 +16,12 @@ * callback we use. */ -static void do_poweroff(struct work_struct *dummy) +static void do_poweroff(void *dummy) { kernel_power_off(); } -static DECLARE_WORK(poweroff_work, do_poweroff); +static DECLARE_WORK(poweroff_work, do_poweroff, NULL); static void handle_poweroff(int key, struct tty_struct *tty) { diff --git a/trunk/kernel/power/process.c b/trunk/kernel/power/process.c index 99eeb119b06d..72e72d2c61e6 100644 --- a/trunk/kernel/power/process.c +++ b/trunk/kernel/power/process.c @@ -13,15 +13,12 @@ #include #include #include -#include /* * Timeout for stopping processes */ #define TIMEOUT (20 * HZ) -#define FREEZER_KERNEL_THREADS 0 -#define FREEZER_USER_SPACE 1 static inline int freezeable(struct task_struct * p) { @@ -42,6 +39,7 @@ void refrigerator(void) long save; save = current->state; pr_debug("%s entered refrigerator\n", current->comm); + printk("="); frozen_process(current); spin_lock_irq(¤t->sighand->siglock); @@ -81,136 +79,96 @@ static void cancel_freezing(struct task_struct *p) } } -static inline int is_user_space(struct task_struct *p) -{ - return p->mm && !(p->flags & PF_BORROWED_MM); -} - -static unsigned int try_to_freeze_tasks(int freeze_user_space) +/* 0 = success, else # of processes that we failed to stop */ +int freeze_processes(void) { + int todo, nr_user, user_frozen; + unsigned long start_time; struct task_struct *g, *p; - unsigned long end_time; - unsigned int todo; - end_time = jiffies + TIMEOUT; + printk( "Stopping tasks: " ); + start_time = jiffies; + user_frozen = 0; do { - todo = 0; + nr_user = todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (!freezeable(p)) continue; - if (frozen(p)) continue; - - if (p->state == TASK_TRACED && - (frozen(p->parent) || - p->parent->state == TASK_STOPPED)) { + if (p->state == TASK_TRACED && frozen(p->parent)) { cancel_freezing(p); continue; } - if (is_user_space(p)) { - if (!freeze_user_space) - continue; - - /* Freeze the task unless there is a vfork - * completion pending + if (p->mm && !(p->flags & PF_BORROWED_MM)) { + /* The task is a user-space one. + * Freeze it unless there's a vfork completion + * pending */ if (!p->vfork_done) freeze_process(p); + nr_user++; } else { - if (freeze_user_space) - continue; - - freeze_process(p); + /* Freeze only if the user space is frozen */ + if (user_frozen) + freeze_process(p); + todo++; } - todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); + todo += nr_user; + if (!user_frozen && !nr_user) { + sys_sync(); + start_time = jiffies; + } + user_frozen = !nr_user; yield(); /* Yield is okay here */ - if (todo && time_after(jiffies, end_time)) + if (todo && time_after(jiffies, start_time + TIMEOUT)) break; - } while (todo); + } while(todo); + /* This does not unfreeze processes that are already frozen + * (we have slightly ugly calling convention in that respect, + * and caller must call thaw_processes() if something fails), + * but it cleans up leftover PF_FREEZE requests. + */ if (todo) { - /* This does not unfreeze processes that are already frozen - * (we have slightly ugly calling convention in that respect, - * and caller must call thaw_processes() if something fails), - * but it cleans up leftover PF_FREEZE requests. - */ - printk("\n"); - printk(KERN_ERR "Stopping %s timed out after %d seconds " - "(%d tasks refusing to freeze):\n", - freeze_user_space ? "user space processes" : - "kernel threads", - TIMEOUT / HZ, todo); + printk( "\n" ); + printk(KERN_ERR " stopping tasks timed out " + "after %d seconds (%d tasks remaining):\n", + TIMEOUT / HZ, todo); read_lock(&tasklist_lock); do_each_thread(g, p) { - if (is_user_space(p) == !freeze_user_space) - continue; - if (freezeable(p) && !frozen(p)) - printk(KERN_ERR " %s\n", p->comm); - + printk(KERN_ERR " %s\n", p->comm); cancel_freezing(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + return todo; } - return todo; -} - -/** - * freeze_processes - tell processes to enter the refrigerator - * - * Returns 0 on success, or the number of processes that didn't freeze, - * although they were told to. - */ -int freeze_processes(void) -{ - unsigned int nr_unfrozen; - - printk("Stopping tasks ... "); - nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE); - if (nr_unfrozen) - return nr_unfrozen; - - sys_sync(); - nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS); - if (nr_unfrozen) - return nr_unfrozen; - - printk("done.\n"); + printk( "|\n" ); BUG_ON(in_atomic()); return 0; } -static void thaw_tasks(int thaw_user_space) +void thaw_processes(void) { struct task_struct *g, *p; + printk( "Restarting tasks..." ); read_lock(&tasklist_lock); do_each_thread(g, p) { if (!freezeable(p)) continue; - - if (is_user_space(p) == !thaw_user_space) - continue; - if (!thaw_process(p)) - printk(KERN_WARNING " Strange, %s not stopped\n", - p->comm ); + printk(KERN_INFO " Strange, %s not stopped\n", p->comm ); } while_each_thread(g, p); - read_unlock(&tasklist_lock); -} -void thaw_processes(void) -{ - printk("Restarting tasks ... "); - thaw_tasks(FREEZER_KERNEL_THREADS); - thaw_tasks(FREEZER_USER_SPACE); + read_unlock(&tasklist_lock); schedule(); - printk("done.\n"); + printk( " done\n" ); } EXPORT_SYMBOL(refrigerator); diff --git a/trunk/kernel/power/snapshot.c b/trunk/kernel/power/snapshot.c index c024606221c4..99f9b7d177d6 100644 --- a/trunk/kernel/power/snapshot.c +++ b/trunk/kernel/power/snapshot.c @@ -1,15 +1,15 @@ /* * linux/kernel/power/snapshot.c * - * This file provides system snapshot/restore functionality for swsusp. + * This file provide system snapshot/restore functionality. * * Copyright (C) 1998-2005 Pavel Machek - * Copyright (C) 2006 Rafael J. Wysocki * - * This file is released under the GPLv2. + * This file is released under the GPLv2, and is based on swsusp.c. * */ + #include #include #include @@ -34,24 +34,137 @@ #include "power.h" -/* List of PBEs needed for restoring the pages that were allocated before - * the suspend and included in the suspend image, but have also been - * allocated by the "resume" kernel, so their contents cannot be written - * directly to their "original" page frames. - */ +/* List of PBEs used for creating and restoring the suspend image */ struct pbe *restore_pblist; -/* Pointer to an auxiliary buffer (1 page) */ +static unsigned int nr_copy_pages; +static unsigned int nr_meta_pages; static void *buffer; +#ifdef CONFIG_HIGHMEM +unsigned int count_highmem_pages(void) +{ + struct zone *zone; + unsigned long zone_pfn; + unsigned int n = 0; + + for_each_zone (zone) + if (is_highmem(zone)) { + mark_free_pages(zone); + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { + struct page *page; + unsigned long pfn = zone_pfn + zone->zone_start_pfn; + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + if (PageReserved(page)) + continue; + if (PageNosaveFree(page)) + continue; + n++; + } + } + return n; +} + +struct highmem_page { + char *data; + struct page *page; + struct highmem_page *next; +}; + +static struct highmem_page *highmem_copy; + +static int save_highmem_zone(struct zone *zone) +{ + unsigned long zone_pfn; + mark_free_pages(zone); + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { + struct page *page; + struct highmem_page *save; + void *kaddr; + unsigned long pfn = zone_pfn + zone->zone_start_pfn; + + if (!(pfn%10000)) + printk("."); + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + /* + * This condition results from rvmalloc() sans vmalloc_32() + * and architectural memory reservations. This should be + * corrected eventually when the cases giving rise to this + * are better understood. + */ + if (PageReserved(page)) + continue; + BUG_ON(PageNosave(page)); + if (PageNosaveFree(page)) + continue; + save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); + if (!save) + return -ENOMEM; + save->next = highmem_copy; + save->page = page; + save->data = (void *) get_zeroed_page(GFP_ATOMIC); + if (!save->data) { + kfree(save); + return -ENOMEM; + } + kaddr = kmap_atomic(page, KM_USER0); + memcpy(save->data, kaddr, PAGE_SIZE); + kunmap_atomic(kaddr, KM_USER0); + highmem_copy = save; + } + return 0; +} + +int save_highmem(void) +{ + struct zone *zone; + int res = 0; + + pr_debug("swsusp: Saving Highmem"); + drain_local_pages(); + for_each_zone (zone) { + if (is_highmem(zone)) + res = save_highmem_zone(zone); + if (res) + return res; + } + printk("\n"); + return 0; +} + +int restore_highmem(void) +{ + printk("swsusp: Restoring Highmem\n"); + while (highmem_copy) { + struct highmem_page *save = highmem_copy; + void *kaddr; + highmem_copy = save->next; + + kaddr = kmap_atomic(save->page, KM_USER0); + memcpy(kaddr, save->data, PAGE_SIZE); + kunmap_atomic(kaddr, KM_USER0); + free_page((long) save->data); + kfree(save); + } + return 0; +} +#else +static inline unsigned int count_highmem_pages(void) {return 0;} +static inline int save_highmem(void) {return 0;} +static inline int restore_highmem(void) {return 0;} +#endif + /** * @safe_needed - on resume, for storing the PBE list and the image, * we can only use memory pages that do not conflict with the pages - * used before suspend. The unsafe pages have PageNosaveFree set - * and we count them using unsafe_pages. + * used before suspend. * - * Each allocated image page is marked as PageNosave and PageNosaveFree - * so that swsusp_free() can release it. + * The unsafe pages are marked with the PG_nosave_free flag + * and we count them using unsafe_pages */ #define PG_ANY 0 @@ -61,7 +174,7 @@ static void *buffer; static unsigned int allocated_unsafe_pages; -static void *get_image_page(gfp_t gfp_mask, int safe_needed) +static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; @@ -82,39 +195,20 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed) unsigned long get_safe_page(gfp_t gfp_mask) { - return (unsigned long)get_image_page(gfp_mask, PG_SAFE); -} - -static struct page *alloc_image_page(gfp_t gfp_mask) -{ - struct page *page; - - page = alloc_page(gfp_mask); - if (page) { - SetPageNosave(page); - SetPageNosaveFree(page); - } - return page; + return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE); } /** * free_image_page - free page represented by @addr, allocated with - * get_image_page (page flags set by it must be cleared) + * alloc_image_page (page flags set by it must be cleared) */ static inline void free_image_page(void *addr, int clear_nosave_free) { - struct page *page; - - BUG_ON(!virt_addr_valid(addr)); - - page = virt_to_page(addr); - - ClearPageNosave(page); + ClearPageNosave(virt_to_page(addr)); if (clear_nosave_free) - ClearPageNosaveFree(page); - - __free_page(page); + ClearPageNosaveFree(virt_to_page(addr)); + free_page((unsigned long)addr); } /* struct linked_page is used to build chains of pages */ @@ -175,7 +269,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { struct linked_page *lp; - lp = get_image_page(ca->gfp_mask, ca->safe_needed); + lp = alloc_image_page(ca->gfp_mask, ca->safe_needed); if (!lp) return NULL; @@ -352,8 +446,8 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) /* Compute the number of zones */ nr = 0; - for_each_zone(zone) - if (populated_zone(zone)) + for_each_zone (zone) + if (populated_zone(zone) && !is_highmem(zone)) nr++; /* Allocate the list of zones bitmap objects */ @@ -365,10 +459,10 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) } /* Initialize the zone bitmap objects */ - for_each_zone(zone) { + for_each_zone (zone) { unsigned long pfn; - if (!populated_zone(zone)) + if (!populated_zone(zone) || is_highmem(zone)) continue; zone_bm->start_pfn = zone->zone_start_pfn; @@ -387,7 +481,7 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) while (bb) { unsigned long *ptr; - ptr = get_image_page(gfp_mask, safe_needed); + ptr = alloc_image_page(gfp_mask, safe_needed); bb->data = ptr; if (!ptr) goto Free; @@ -411,7 +505,7 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) memory_bm_position_reset(bm); return 0; - Free: +Free: bm->p_list = ca.chain; memory_bm_free(bm, PG_UNSAFE_CLEAR); return -ENOMEM; @@ -557,7 +651,7 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) memory_bm_position_reset(bm); return BM_END_OF_MAP; - Return_pfn: +Return_pfn: bm->cur.chunk = chunk; bm->cur.bit = bit; return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; @@ -575,80 +669,8 @@ unsigned int snapshot_additional_pages(struct zone *zone) res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); - return 2 * res; -} - -#ifdef CONFIG_HIGHMEM -/** - * count_free_highmem_pages - compute the total number of free highmem - * pages, system-wide. - */ - -static unsigned int count_free_highmem_pages(void) -{ - struct zone *zone; - unsigned int cnt = 0; - - for_each_zone(zone) - if (populated_zone(zone) && is_highmem(zone)) - cnt += zone->free_pages; - - return cnt; -} - -/** - * saveable_highmem_page - Determine whether a highmem page should be - * included in the suspend image. - * - * We should save the page if it isn't Nosave or NosaveFree, or Reserved, - * and it isn't a part of a free chunk of pages. - */ - -static struct page *saveable_highmem_page(unsigned long pfn) -{ - struct page *page; - - if (!pfn_valid(pfn)) - return NULL; - - page = pfn_to_page(pfn); - - BUG_ON(!PageHighMem(page)); - - if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page)) - return NULL; - - return page; -} - -/** - * count_highmem_pages - compute the total number of saveable highmem - * pages. - */ - -unsigned int count_highmem_pages(void) -{ - struct zone *zone; - unsigned int n = 0; - - for_each_zone(zone) { - unsigned long pfn, max_zone_pfn; - - if (!is_highmem(zone)) - continue; - - mark_free_pages(zone); - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; - for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) - if (saveable_highmem_page(pfn)) - n++; - } - return n; + return res; } -#else -static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } -static inline unsigned int count_highmem_pages(void) { return 0; } -#endif /* CONFIG_HIGHMEM */ /** * pfn_is_nosave - check if given pfn is in the 'nosave' section @@ -662,12 +684,12 @@ static inline int pfn_is_nosave(unsigned long pfn) } /** - * saveable - Determine whether a non-highmem page should be included in - * the suspend image. + * saveable - Determine whether a page should be cloned or not. + * @pfn: The page * - * We should save the page if it isn't Nosave, and is not in the range - * of pages statically defined as 'unsaveable', and it isn't a part of - * a free chunk of pages. + * We save a page if it isn't Nosave, and is not in the range of pages + * statically defined as 'unsaveable', and it + * isn't a part of a free chunk of pages. */ static struct page *saveable_page(unsigned long pfn) @@ -679,130 +701,76 @@ static struct page *saveable_page(unsigned long pfn) page = pfn_to_page(pfn); - BUG_ON(PageHighMem(page)); - - if (PageNosave(page) || PageNosaveFree(page)) + if (PageNosave(page)) return NULL; - if (PageReserved(page) && pfn_is_nosave(pfn)) return NULL; + if (PageNosaveFree(page)) + return NULL; return page; } -/** - * count_data_pages - compute the total number of saveable non-highmem - * pages. - */ - unsigned int count_data_pages(void) { struct zone *zone; unsigned long pfn, max_zone_pfn; unsigned int n = 0; - for_each_zone(zone) { + for_each_zone (zone) { if (is_highmem(zone)) continue; - mark_free_pages(zone); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) - if(saveable_page(pfn)) - n++; + n += !!saveable_page(pfn); } return n; } -/* This is needed, because copy_page and memcpy are not usable for copying - * task structs. - */ -static inline void do_copy_page(long *dst, long *src) +static inline void copy_data_page(long *dst, long *src) { int n; + /* copy_page and memcpy are not usable for copying task structs. */ for (n = PAGE_SIZE / sizeof(long); n; n--) *dst++ = *src++; } -#ifdef CONFIG_HIGHMEM -static inline struct page * -page_is_saveable(struct zone *zone, unsigned long pfn) -{ - return is_highmem(zone) ? - saveable_highmem_page(pfn) : saveable_page(pfn); -} - -static inline void -copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) -{ - struct page *s_page, *d_page; - void *src, *dst; - - s_page = pfn_to_page(src_pfn); - d_page = pfn_to_page(dst_pfn); - if (PageHighMem(s_page)) { - src = kmap_atomic(s_page, KM_USER0); - dst = kmap_atomic(d_page, KM_USER1); - do_copy_page(dst, src); - kunmap_atomic(src, KM_USER0); - kunmap_atomic(dst, KM_USER1); - } else { - src = page_address(s_page); - if (PageHighMem(d_page)) { - /* Page pointed to by src may contain some kernel - * data modified by kmap_atomic() - */ - do_copy_page(buffer, src); - dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0); - memcpy(dst, buffer, PAGE_SIZE); - kunmap_atomic(dst, KM_USER0); - } else { - dst = page_address(d_page); - do_copy_page(dst, src); - } - } -} -#else -#define page_is_saveable(zone, pfn) saveable_page(pfn) - -static inline void -copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) -{ - do_copy_page(page_address(pfn_to_page(dst_pfn)), - page_address(pfn_to_page(src_pfn))); -} -#endif /* CONFIG_HIGHMEM */ - static void copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) { struct zone *zone; unsigned long pfn; - for_each_zone(zone) { + for_each_zone (zone) { unsigned long max_zone_pfn; + if (is_highmem(zone)) + continue; + mark_free_pages(zone); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) - if (page_is_saveable(zone, pfn)) + if (saveable_page(pfn)) memory_bm_set_bit(orig_bm, pfn); } memory_bm_position_reset(orig_bm); memory_bm_position_reset(copy_bm); do { pfn = memory_bm_next_pfn(orig_bm); - if (likely(pfn != BM_END_OF_MAP)) - copy_data_page(memory_bm_next_pfn(copy_bm), pfn); + if (likely(pfn != BM_END_OF_MAP)) { + struct page *page; + void *src; + + page = pfn_to_page(pfn); + src = page_address(page); + page = pfn_to_page(memory_bm_next_pfn(copy_bm)); + copy_data_page(page_address(page), src); + } } while (pfn != BM_END_OF_MAP); } -/* Total number of image pages */ -static unsigned int nr_copy_pages; -/* Number of pages needed for saving the original pfns of the image pages */ -static unsigned int nr_meta_pages; - /** * swsusp_free - free pages allocated for the suspend. * @@ -824,7 +792,7 @@ void swsusp_free(void) if (PageNosave(page) && PageNosaveFree(page)) { ClearPageNosave(page); ClearPageNosaveFree(page); - __free_page(page); + free_page((long) page_address(page)); } } } @@ -834,108 +802,34 @@ void swsusp_free(void) buffer = NULL; } -#ifdef CONFIG_HIGHMEM -/** - * count_pages_for_highmem - compute the number of non-highmem pages - * that will be necessary for creating copies of highmem pages. - */ - -static unsigned int count_pages_for_highmem(unsigned int nr_highmem) -{ - unsigned int free_highmem = count_free_highmem_pages(); - - if (free_highmem >= nr_highmem) - nr_highmem = 0; - else - nr_highmem -= free_highmem; - - return nr_highmem; -} -#else -static unsigned int -count_pages_for_highmem(unsigned int nr_highmem) { return 0; } -#endif /* CONFIG_HIGHMEM */ /** - * enough_free_mem - Make sure we have enough free memory for the - * snapshot image. + * enough_free_mem - Make sure we enough free memory to snapshot. + * + * Returns TRUE or FALSE after checking the number of available + * free pages. */ -static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) +static int enough_free_mem(unsigned int nr_pages) { struct zone *zone; unsigned int free = 0, meta = 0; - for_each_zone(zone) { - meta += snapshot_additional_pages(zone); - if (!is_highmem(zone)) + for_each_zone (zone) + if (!is_highmem(zone)) { free += zone->free_pages; - } + meta += snapshot_additional_pages(zone); + } - nr_pages += count_pages_for_highmem(nr_highmem); - pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n", + pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n", nr_pages, PAGES_FOR_IO, meta, free); return free > nr_pages + PAGES_FOR_IO + meta; } -#ifdef CONFIG_HIGHMEM -/** - * get_highmem_buffer - if there are some highmem pages in the suspend - * image, we may need the buffer to copy them and/or load their data. - */ - -static inline int get_highmem_buffer(int safe_needed) -{ - buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); - return buffer ? 0 : -ENOMEM; -} - -/** - * alloc_highmem_image_pages - allocate some highmem pages for the image. - * Try to allocate as many pages as needed, but if the number of free - * highmem pages is lesser than that, allocate them all. - */ - -static inline unsigned int -alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem) -{ - unsigned int to_alloc = count_free_highmem_pages(); - - if (to_alloc > nr_highmem) - to_alloc = nr_highmem; - - nr_highmem -= to_alloc; - while (to_alloc-- > 0) { - struct page *page; - - page = alloc_image_page(__GFP_HIGHMEM); - memory_bm_set_bit(bm, page_to_pfn(page)); - } - return nr_highmem; -} -#else -static inline int get_highmem_buffer(int safe_needed) { return 0; } - -static inline unsigned int -alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; } -#endif /* CONFIG_HIGHMEM */ - -/** - * swsusp_alloc - allocate memory for the suspend image - * - * We first try to allocate as many highmem pages as there are - * saveable highmem pages in the system. If that fails, we allocate - * non-highmem pages for the copies of the remaining highmem ones. - * - * In this approach it is likely that the copies of highmem pages will - * also be located in the high memory, because of the way in which - * copy_data_pages() works. - */ - static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, - unsigned int nr_pages, unsigned int nr_highmem) + unsigned int nr_pages) { int error; @@ -947,61 +841,46 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, if (error) goto Free; - if (nr_highmem > 0) { - error = get_highmem_buffer(PG_ANY); - if (error) - goto Free; - - nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem); - } while (nr_pages-- > 0) { - struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD); - + struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); if (!page) goto Free; + SetPageNosave(page); + SetPageNosaveFree(page); memory_bm_set_bit(copy_bm, page_to_pfn(page)); } return 0; - Free: +Free: swsusp_free(); return -ENOMEM; } -/* Memory bitmap used for marking saveable pages (during suspend) or the - * suspend image pages (during resume) - */ +/* Memory bitmap used for marking saveable pages */ static struct memory_bitmap orig_bm; -/* Memory bitmap used on suspend for marking allocated pages that will contain - * the copies of saveable pages. During resume it is initially used for - * marking the suspend image pages, but then its set bits are duplicated in - * @orig_bm and it is released. Next, on systems with high memory, it may be - * used for marking "safe" highmem pages, but it has to be reinitialized for - * this purpose. +/* Memory bitmap used for marking allocated pages that will contain the copies + * of saveable pages */ static struct memory_bitmap copy_bm; asmlinkage int swsusp_save(void) { - unsigned int nr_pages, nr_highmem; + unsigned int nr_pages; - printk("swsusp: critical section: \n"); + pr_debug("swsusp: critical section: \n"); drain_local_pages(); nr_pages = count_data_pages(); - nr_highmem = count_highmem_pages(); - printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem); + printk("swsusp: Need to copy %u pages\n", nr_pages); - if (!enough_free_mem(nr_pages, nr_highmem)) { + if (!enough_free_mem(nr_pages)) { printk(KERN_ERR "swsusp: Not enough free memory\n"); return -ENOMEM; } - if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) { - printk(KERN_ERR "swsusp: Memory allocation failed\n"); + if (swsusp_alloc(&orig_bm, ©_bm, nr_pages)) return -ENOMEM; - } /* During allocating of suspend pagedir, new cold pages may appear. * Kill them. @@ -1015,12 +894,10 @@ asmlinkage int swsusp_save(void) * touch swap space! Except we must write out our image of course. */ - nr_pages += nr_highmem; nr_copy_pages = nr_pages; - nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); + nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); - return 0; } @@ -1083,7 +960,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ - buffer = get_image_page(GFP_ATOMIC, PG_ANY); + buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); if (!buffer) return -ENOMEM; } @@ -1098,23 +975,9 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) memset(buffer, 0, PAGE_SIZE); pack_pfns(buffer, &orig_bm); } else { - struct page *page; + unsigned long pfn = memory_bm_next_pfn(©_bm); - page = pfn_to_page(memory_bm_next_pfn(©_bm)); - if (PageHighMem(page)) { - /* Highmem pages are copied to the buffer, - * because we can't return with a kmapped - * highmem page (we may not be called again). - */ - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - memcpy(buffer, kaddr, PAGE_SIZE); - kunmap_atomic(kaddr, KM_USER0); - handle->buffer = buffer; - } else { - handle->buffer = page_address(page); - } + handle->buffer = page_address(pfn_to_page(pfn)); } handle->prev = handle->cur; } @@ -1142,7 +1005,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) unsigned long pfn, max_zone_pfn; /* Clear page flags */ - for_each_zone(zone) { + for_each_zone (zone) { max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) @@ -1238,218 +1101,6 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) } } -/* List of "safe" pages that may be used to store data loaded from the suspend - * image - */ -static struct linked_page *safe_pages_list; - -#ifdef CONFIG_HIGHMEM -/* struct highmem_pbe is used for creating the list of highmem pages that - * should be restored atomically during the resume from disk, because the page - * frames they have occupied before the suspend are in use. - */ -struct highmem_pbe { - struct page *copy_page; /* data is here now */ - struct page *orig_page; /* data was here before the suspend */ - struct highmem_pbe *next; -}; - -/* List of highmem PBEs needed for restoring the highmem pages that were - * allocated before the suspend and included in the suspend image, but have - * also been allocated by the "resume" kernel, so their contents cannot be - * written directly to their "original" page frames. - */ -static struct highmem_pbe *highmem_pblist; - -/** - * count_highmem_image_pages - compute the number of highmem pages in the - * suspend image. The bits in the memory bitmap @bm that correspond to the - * image pages are assumed to be set. - */ - -static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) -{ - unsigned long pfn; - unsigned int cnt = 0; - - memory_bm_position_reset(bm); - pfn = memory_bm_next_pfn(bm); - while (pfn != BM_END_OF_MAP) { - if (PageHighMem(pfn_to_page(pfn))) - cnt++; - - pfn = memory_bm_next_pfn(bm); - } - return cnt; -} - -/** - * prepare_highmem_image - try to allocate as many highmem pages as - * there are highmem image pages (@nr_highmem_p points to the variable - * containing the number of highmem image pages). The pages that are - * "safe" (ie. will not be overwritten when the suspend image is - * restored) have the corresponding bits set in @bm (it must be - * unitialized). - * - * NOTE: This function should not be called if there are no highmem - * image pages. - */ - -static unsigned int safe_highmem_pages; - -static struct memory_bitmap *safe_highmem_bm; - -static int -prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) -{ - unsigned int to_alloc; - - if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) - return -ENOMEM; - - if (get_highmem_buffer(PG_SAFE)) - return -ENOMEM; - - to_alloc = count_free_highmem_pages(); - if (to_alloc > *nr_highmem_p) - to_alloc = *nr_highmem_p; - else - *nr_highmem_p = to_alloc; - - safe_highmem_pages = 0; - while (to_alloc-- > 0) { - struct page *page; - - page = alloc_page(__GFP_HIGHMEM); - if (!PageNosaveFree(page)) { - /* The page is "safe", set its bit the bitmap */ - memory_bm_set_bit(bm, page_to_pfn(page)); - safe_highmem_pages++; - } - /* Mark the page as allocated */ - SetPageNosave(page); - SetPageNosaveFree(page); - } - memory_bm_position_reset(bm); - safe_highmem_bm = bm; - return 0; -} - -/** - * get_highmem_page_buffer - for given highmem image page find the buffer - * that suspend_write_next() should set for its caller to write to. - * - * If the page is to be saved to its "original" page frame or a copy of - * the page is to be made in the highmem, @buffer is returned. Otherwise, - * the copy of the page is to be made in normal memory, so the address of - * the copy is returned. - * - * If @buffer is returned, the caller of suspend_write_next() will write - * the page's contents to @buffer, so they will have to be copied to the - * right location on the next call to suspend_write_next() and it is done - * with the help of copy_last_highmem_page(). For this purpose, if - * @buffer is returned, @last_highmem page is set to the page to which - * the data will have to be copied from @buffer. - */ - -static struct page *last_highmem_page; - -static void * -get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) -{ - struct highmem_pbe *pbe; - void *kaddr; - - if (PageNosave(page) && PageNosaveFree(page)) { - /* We have allocated the "original" page frame and we can - * use it directly to store the loaded page. - */ - last_highmem_page = page; - return buffer; - } - /* The "original" page frame has not been allocated and we have to - * use a "safe" page frame to store the loaded page. - */ - pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); - if (!pbe) { - swsusp_free(); - return NULL; - } - pbe->orig_page = page; - if (safe_highmem_pages > 0) { - struct page *tmp; - - /* Copy of the page will be stored in high memory */ - kaddr = buffer; - tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); - safe_highmem_pages--; - last_highmem_page = tmp; - pbe->copy_page = tmp; - } else { - /* Copy of the page will be stored in normal memory */ - kaddr = safe_pages_list; - safe_pages_list = safe_pages_list->next; - pbe->copy_page = virt_to_page(kaddr); - } - pbe->next = highmem_pblist; - highmem_pblist = pbe; - return kaddr; -} - -/** - * copy_last_highmem_page - copy the contents of a highmem image from - * @buffer, where the caller of snapshot_write_next() has place them, - * to the right location represented by @last_highmem_page . - */ - -static void copy_last_highmem_page(void) -{ - if (last_highmem_page) { - void *dst; - - dst = kmap_atomic(last_highmem_page, KM_USER0); - memcpy(dst, buffer, PAGE_SIZE); - kunmap_atomic(dst, KM_USER0); - last_highmem_page = NULL; - } -} - -static inline int last_highmem_page_copied(void) -{ - return !last_highmem_page; -} - -static inline void free_highmem_data(void) -{ - if (safe_highmem_bm) - memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); - - if (buffer) - free_image_page(buffer, PG_UNSAFE_CLEAR); -} -#else -static inline int get_safe_write_buffer(void) { return 0; } - -static unsigned int -count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } - -static inline int -prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) -{ - return 0; -} - -static inline void * -get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) -{ - return NULL; -} - -static inline void copy_last_highmem_page(void) {} -static inline int last_highmem_page_copied(void) { return 1; } -static inline void free_highmem_data(void) {} -#endif /* CONFIG_HIGHMEM */ - /** * prepare_image - use the memory bitmap @bm to mark the pages that will * be overwritten in the process of restoring the system memory state @@ -1459,25 +1110,20 @@ static inline void free_highmem_data(void) {} * The idea is to allocate a new memory bitmap first and then allocate * as many pages as needed for the image data, but not to assign these * pages to specific tasks initially. Instead, we just mark them as - * allocated and create a lists of "safe" pages that will be used - * later. On systems with high memory a list of "safe" highmem pages is - * also created. + * allocated and create a list of "safe" pages that will be used later. */ #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) +static struct linked_page *safe_pages_list; + static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) { - unsigned int nr_pages, nr_highmem; + unsigned int nr_pages; struct linked_page *sp_list, *lp; int error; - /* If there is no highmem, the buffer will not be necessary */ - free_image_page(buffer, PG_UNSAFE_CLEAR); - buffer = NULL; - - nr_highmem = count_highmem_image_pages(bm); error = mark_unsafe_pages(bm); if (error) goto Free; @@ -1488,11 +1134,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) duplicate_memory_bitmap(new_bm, bm); memory_bm_free(bm, PG_UNSAFE_KEEP); - if (nr_highmem > 0) { - error = prepare_highmem_image(bm, &nr_highmem); - if (error) - goto Free; - } /* Reserve some safe pages for potential later use. * * NOTE: This way we make sure there will be enough safe pages for the @@ -1501,10 +1142,10 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) */ sp_list = NULL; /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ - nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; + nr_pages = nr_copy_pages - allocated_unsafe_pages; nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); while (nr_pages > 0) { - lp = get_image_page(GFP_ATOMIC, PG_SAFE); + lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); if (!lp) { error = -ENOMEM; goto Free; @@ -1515,7 +1156,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) } /* Preallocate memory for the image */ safe_pages_list = NULL; - nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; + nr_pages = nr_copy_pages - allocated_unsafe_pages; while (nr_pages > 0) { lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); if (!lp) { @@ -1540,7 +1181,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) } return 0; - Free: +Free: swsusp_free(); return error; } @@ -1555,9 +1196,6 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) struct pbe *pbe; struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); - if (PageHighMem(page)) - return get_highmem_page_buffer(page, ca); - if (PageNosave(page) && PageNosaveFree(page)) /* We have allocated the "original" page frame and we can * use it directly to store the loaded page. @@ -1572,12 +1210,12 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) swsusp_free(); return NULL; } - pbe->orig_address = page_address(page); - pbe->address = safe_pages_list; + pbe->orig_address = (unsigned long)page_address(page); + pbe->address = (unsigned long)safe_pages_list; safe_pages_list = safe_pages_list->next; pbe->next = restore_pblist; restore_pblist = pbe; - return pbe->address; + return (void *)pbe->address; } /** @@ -1611,16 +1249,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) return 0; - if (handle->offset == 0) { - if (!buffer) - /* This makes the buffer be freed by swsusp_free() */ - buffer = get_image_page(GFP_ATOMIC, PG_ANY); - + if (!buffer) { + /* This makes the buffer be freed by swsusp_free() */ + buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); if (!buffer) return -ENOMEM; - - handle->buffer = buffer; } + if (!handle->offset) + handle->buffer = buffer; handle->sync_read = 1; if (handle->prev < handle->cur) { if (handle->prev == 0) { @@ -1648,10 +1284,8 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) return -ENOMEM; } } else { - copy_last_highmem_page(); handle->buffer = get_buffer(&orig_bm, &ca); - if (handle->buffer != buffer) - handle->sync_read = 0; + handle->sync_read = 0; } handle->prev = handle->cur; } @@ -1667,73 +1301,15 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) return count; } -/** - * snapshot_write_finalize - must be called after the last call to - * snapshot_write_next() in case the last page in the image happens - * to be a highmem page and its contents should be stored in the - * highmem. Additionally, it releases the memory that will not be - * used any more. - */ - -void snapshot_write_finalize(struct snapshot_handle *handle) -{ - copy_last_highmem_page(); - /* Free only if we have loaded the image entirely */ - if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) { - memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); - free_highmem_data(); - } -} - int snapshot_image_loaded(struct snapshot_handle *handle) { - return !(!nr_copy_pages || !last_highmem_page_copied() || + return !(!nr_copy_pages || handle->cur <= nr_meta_pages + nr_copy_pages); } -#ifdef CONFIG_HIGHMEM -/* Assumes that @buf is ready and points to a "safe" page */ -static inline void -swap_two_pages_data(struct page *p1, struct page *p2, void *buf) -{ - void *kaddr1, *kaddr2; - - kaddr1 = kmap_atomic(p1, KM_USER0); - kaddr2 = kmap_atomic(p2, KM_USER1); - memcpy(buf, kaddr1, PAGE_SIZE); - memcpy(kaddr1, kaddr2, PAGE_SIZE); - memcpy(kaddr2, buf, PAGE_SIZE); - kunmap_atomic(kaddr1, KM_USER0); - kunmap_atomic(kaddr2, KM_USER1); -} - -/** - * restore_highmem - for each highmem page that was allocated before - * the suspend and included in the suspend image, and also has been - * allocated by the "resume" kernel swap its current (ie. "before - * resume") contents with the previous (ie. "before suspend") one. - * - * If the resume eventually fails, we can call this function once - * again and restore the "before resume" highmem state. - */ - -int restore_highmem(void) +void snapshot_free_unused_memory(struct snapshot_handle *handle) { - struct highmem_pbe *pbe = highmem_pblist; - void *buf; - - if (!pbe) - return 0; - - buf = get_image_page(GFP_ATOMIC, PG_SAFE); - if (!buf) - return -ENOMEM; - - while (pbe) { - swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); - pbe = pbe->next; - } - free_image_page(buf, PG_UNSAFE_CLEAR); - return 0; + /* Free only if we have loaded the image entirely */ + if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) + memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); } -#endif /* CONFIG_HIGHMEM */ diff --git a/trunk/kernel/power/swap.c b/trunk/kernel/power/swap.c index f133d4a6d817..1a3b0dd2c3fc 100644 --- a/trunk/kernel/power/swap.c +++ b/trunk/kernel/power/swap.c @@ -34,123 +34,34 @@ extern char resume_file[]; #define SWSUSP_SIG "S1SUSPEND" static struct swsusp_header { - char reserved[PAGE_SIZE - 20 - sizeof(sector_t)]; - sector_t image; + char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; + swp_entry_t image; char orig_sig[10]; char sig[10]; } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; /* - * General things + * Saving part... */ static unsigned short root_swap = 0xffff; -static struct block_device *resume_bdev; - -/** - * submit - submit BIO request. - * @rw: READ or WRITE. - * @off physical offset of page. - * @page: page we're reading or writing. - * @bio_chain: list of pending biod (for async reading) - * - * Straight from the textbook - allocate and initialize the bio. - * If we're reading, make sure the page is marked as dirty. - * Then submit it and, if @bio_chain == NULL, wait. - */ -static int submit(int rw, pgoff_t page_off, struct page *page, - struct bio **bio_chain) -{ - struct bio *bio; - - bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - if (!bio) - return -ENOMEM; - bio->bi_sector = page_off * (PAGE_SIZE >> 9); - bio->bi_bdev = resume_bdev; - bio->bi_end_io = end_swap_bio_read; - - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { - printk("swsusp: ERROR: adding page to bio at %ld\n", page_off); - bio_put(bio); - return -EFAULT; - } - - lock_page(page); - bio_get(bio); - - if (bio_chain == NULL) { - submit_bio(rw | (1 << BIO_RW_SYNC), bio); - wait_on_page_locked(page); - if (rw == READ) - bio_set_pages_dirty(bio); - bio_put(bio); - } else { - if (rw == READ) - get_page(page); /* These pages are freed later */ - bio->bi_private = *bio_chain; - *bio_chain = bio; - submit_bio(rw | (1 << BIO_RW_SYNC), bio); - } - return 0; -} - -static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain) -{ - return submit(READ, page_off, virt_to_page(addr), bio_chain); -} - -static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain) -{ - return submit(WRITE, page_off, virt_to_page(addr), bio_chain); -} - -static int wait_on_bio_chain(struct bio **bio_chain) -{ - struct bio *bio; - struct bio *next_bio; - int ret = 0; - - if (bio_chain == NULL) - return 0; - - bio = *bio_chain; - if (bio == NULL) - return 0; - while (bio) { - struct page *page; - - next_bio = bio->bi_private; - page = bio->bi_io_vec[0].bv_page; - wait_on_page_locked(page); - if (!PageUptodate(page) || PageError(page)) - ret = -EIO; - put_page(page); - bio_put(bio); - bio = next_bio; - } - *bio_chain = NULL; - return ret; -} - -/* - * Saving part - */ -static int mark_swapfiles(sector_t start) +static int mark_swapfiles(swp_entry_t start) { int error; - bio_read_page(swsusp_resume_block, &swsusp_header, NULL); + rw_swap_page_sync(READ, swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header), NULL); if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); memcpy(swsusp_header.sig,SWSUSP_SIG, 10); swsusp_header.image = start; - error = bio_write_page(swsusp_resume_block, - &swsusp_header, NULL); + error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header), + NULL); } else { - printk(KERN_ERR "swsusp: Swap header not found!\n"); + pr_debug("swsusp: Partition is not swap space.\n"); error = -ENODEV; } return error; @@ -163,21 +74,12 @@ static int mark_swapfiles(sector_t start) static int swsusp_swap_check(void) /* This is called before saving image */ { - int res; - - res = swap_type_of(swsusp_resume_device, swsusp_resume_block); - if (res < 0) - return res; - - root_swap = res; - resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE); - if (IS_ERR(resume_bdev)) - return PTR_ERR(resume_bdev); - - res = set_blocksize(resume_bdev, PAGE_SIZE); - if (res < 0) - blkdev_put(resume_bdev); + int res = swap_type_of(swsusp_resume_device); + if (res >= 0) { + root_swap = res; + return 0; + } return res; } @@ -188,26 +90,36 @@ static int swsusp_swap_check(void) /* This is called before saving image */ * @bio_chain: Link the next write BIO here */ -static int write_page(void *buf, sector_t offset, struct bio **bio_chain) +static int write_page(void *buf, unsigned long offset, struct bio **bio_chain) { - void *src; - - if (!offset) - return -ENOSPC; - - if (bio_chain) { - src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); - if (src) { - memcpy(src, buf, PAGE_SIZE); - } else { - WARN_ON_ONCE(1); - bio_chain = NULL; /* Go synchronous */ - src = buf; + swp_entry_t entry; + int error = -ENOSPC; + + if (offset) { + struct page *page = virt_to_page(buf); + + if (bio_chain) { + /* + * Whether or not we successfully allocated a copy page, + * we take a ref on the page here. It gets undone in + * wait_on_bio_chain(). + */ + struct page *page_copy; + page_copy = alloc_page(GFP_ATOMIC); + if (page_copy == NULL) { + WARN_ON_ONCE(1); + bio_chain = NULL; /* Go synchronous */ + get_page(page); + } else { + memcpy(page_address(page_copy), + page_address(page), PAGE_SIZE); + page = page_copy; + } } - } else { - src = buf; + entry = swp_entry(root_swap, offset); + error = rw_swap_page_sync(WRITE, entry, page, bio_chain); } - return bio_write_page(offset, src, bio_chain); + return error; } /* @@ -225,11 +137,11 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) * at a time. */ -#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) +#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(long) - 1) struct swap_map_page { - sector_t entries[MAP_PAGE_ENTRIES]; - sector_t next_swap; + unsigned long entries[MAP_PAGE_ENTRIES]; + unsigned long next_swap; }; /** @@ -239,7 +151,7 @@ struct swap_map_page { struct swap_map_handle { struct swap_map_page *cur; - sector_t cur_swap; + unsigned long cur_swap; struct bitmap_page *bitmap; unsigned int k; }; @@ -254,6 +166,26 @@ static void release_swap_writer(struct swap_map_handle *handle) handle->bitmap = NULL; } +static void show_speed(struct timeval *start, struct timeval *stop, + unsigned nr_pages, char *msg) +{ + s64 elapsed_centisecs64; + int centisecs; + int k; + int kps; + + elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); + do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); + centisecs = elapsed_centisecs64; + if (centisecs == 0) + centisecs = 1; /* avoid div-by-zero */ + k = nr_pages * (PAGE_SIZE / 1024); + kps = (k * 100) / centisecs; + printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k, + centisecs / 100, centisecs % 100, + kps / 1000, (kps % 1000) / 10); +} + static int get_swap_writer(struct swap_map_handle *handle) { handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); @@ -264,7 +196,7 @@ static int get_swap_writer(struct swap_map_handle *handle) release_swap_writer(handle); return -ENOMEM; } - handle->cur_swap = alloc_swapdev_block(root_swap, handle->bitmap); + handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap); if (!handle->cur_swap) { release_swap_writer(handle); return -ENOSPC; @@ -273,15 +205,43 @@ static int get_swap_writer(struct swap_map_handle *handle) return 0; } +static int wait_on_bio_chain(struct bio **bio_chain) +{ + struct bio *bio; + struct bio *next_bio; + int ret = 0; + + if (bio_chain == NULL) + return 0; + + bio = *bio_chain; + if (bio == NULL) + return 0; + while (bio) { + struct page *page; + + next_bio = bio->bi_private; + page = bio->bi_io_vec[0].bv_page; + wait_on_page_locked(page); + if (!PageUptodate(page) || PageError(page)) + ret = -EIO; + put_page(page); + bio_put(bio); + bio = next_bio; + } + *bio_chain = NULL; + return ret; +} + static int swap_write_page(struct swap_map_handle *handle, void *buf, struct bio **bio_chain) { int error = 0; - sector_t offset; + unsigned long offset; if (!handle->cur) return -EINVAL; - offset = alloc_swapdev_block(root_swap, handle->bitmap); + offset = alloc_swap_page(root_swap, handle->bitmap); error = write_page(buf, offset, bio_chain); if (error) return error; @@ -290,7 +250,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, error = wait_on_bio_chain(bio_chain); if (error) goto out; - offset = alloc_swapdev_block(root_swap, handle->bitmap); + offset = alloc_swap_page(root_swap, handle->bitmap); if (!offset) return -ENOSPC; handle->cur->next_swap = offset; @@ -301,7 +261,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, handle->cur_swap = offset; handle->k = 0; } - out: +out: return error; } @@ -355,7 +315,7 @@ static int save_image(struct swap_map_handle *handle, error = err2; if (!error) printk("\b\b\b\bdone\n"); - swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); + show_speed(&start, &stop, nr_to_write, "Wrote"); return error; } @@ -390,50 +350,100 @@ int swsusp_write(void) struct swsusp_info *header; int error; - error = swsusp_swap_check(); - if (error) { + if ((error = swsusp_swap_check())) { printk(KERN_ERR "swsusp: Cannot find swap device, try " "swapon -a.\n"); return error; } memset(&snapshot, 0, sizeof(struct snapshot_handle)); error = snapshot_read_next(&snapshot, PAGE_SIZE); - if (error < PAGE_SIZE) { - if (error >= 0) - error = -EFAULT; - - goto out; - } + if (error < PAGE_SIZE) + return error < 0 ? error : -EFAULT; header = (struct swsusp_info *)data_of(snapshot); if (!enough_swap(header->pages)) { printk(KERN_ERR "swsusp: Not enough free swap\n"); - error = -ENOSPC; - goto out; + return -ENOSPC; } error = get_swap_writer(&handle); if (!error) { - sector_t start = handle.cur_swap; - + unsigned long start = handle.cur_swap; error = swap_write_page(&handle, header, NULL); if (!error) error = save_image(&handle, &snapshot, header->pages - 1); - if (!error) { flush_swap_writer(&handle); printk("S"); - error = mark_swapfiles(start); + error = mark_swapfiles(swp_entry(root_swap, start)); printk("|\n"); } } if (error) free_all_swap_pages(root_swap, handle.bitmap); release_swap_writer(&handle); - out: - swsusp_close(); return error; } +static struct block_device *resume_bdev; + +/** + * submit - submit BIO request. + * @rw: READ or WRITE. + * @off physical offset of page. + * @page: page we're reading or writing. + * @bio_chain: list of pending biod (for async reading) + * + * Straight from the textbook - allocate and initialize the bio. + * If we're reading, make sure the page is marked as dirty. + * Then submit it and, if @bio_chain == NULL, wait. + */ +static int submit(int rw, pgoff_t page_off, struct page *page, + struct bio **bio_chain) +{ + struct bio *bio; + + bio = bio_alloc(GFP_ATOMIC, 1); + if (!bio) + return -ENOMEM; + bio->bi_sector = page_off * (PAGE_SIZE >> 9); + bio->bi_bdev = resume_bdev; + bio->bi_end_io = end_swap_bio_read; + + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { + printk("swsusp: ERROR: adding page to bio at %ld\n", page_off); + bio_put(bio); + return -EFAULT; + } + + lock_page(page); + bio_get(bio); + + if (bio_chain == NULL) { + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + wait_on_page_locked(page); + if (rw == READ) + bio_set_pages_dirty(bio); + bio_put(bio); + } else { + if (rw == READ) + get_page(page); /* These pages are freed later */ + bio->bi_private = *bio_chain; + *bio_chain = bio; + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + } + return 0; +} + +static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain) +{ + return submit(READ, page_off, virt_to_page(addr), bio_chain); +} + +static int bio_write_page(pgoff_t page_off, void *addr) +{ + return submit(WRITE, page_off, virt_to_page(addr), NULL); +} + /** * The following functions allow us to read data using a swap map * in a file-alike way @@ -446,18 +456,17 @@ static void release_swap_reader(struct swap_map_handle *handle) handle->cur = NULL; } -static int get_swap_reader(struct swap_map_handle *handle, sector_t start) +static int get_swap_reader(struct swap_map_handle *handle, + swp_entry_t start) { int error; - if (!start) + if (!swp_offset(start)) return -EINVAL; - - handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); + handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); if (!handle->cur) return -ENOMEM; - - error = bio_read_page(start, handle->cur, NULL); + error = bio_read_page(swp_offset(start), handle->cur, NULL); if (error) { release_swap_reader(handle); return error; @@ -469,7 +478,7 @@ static int get_swap_reader(struct swap_map_handle *handle, sector_t start) static int swap_read_page(struct swap_map_handle *handle, void *buf, struct bio **bio_chain) { - sector_t offset; + unsigned long offset; int error; if (!handle->cur) @@ -538,11 +547,11 @@ static int load_image(struct swap_map_handle *handle, error = err2; if (!error) { printk("\b\b\b\bdone\n"); - snapshot_write_finalize(snapshot); + snapshot_free_unused_memory(snapshot); if (!snapshot_image_loaded(snapshot)) error = -ENODATA; } - swsusp_show_speed(&start, &stop, nr_to_read, "Read"); + show_speed(&start, &stop, nr_to_read, "Read"); return error; } @@ -591,16 +600,12 @@ int swsusp_check(void) if (!IS_ERR(resume_bdev)) { set_blocksize(resume_bdev, PAGE_SIZE); memset(&swsusp_header, 0, sizeof(swsusp_header)); - error = bio_read_page(swsusp_resume_block, - &swsusp_header, NULL); - if (error) + if ((error = bio_read_page(0, &swsusp_header, NULL))) return error; - if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); /* Reset swap signature now */ - error = bio_write_page(swsusp_resume_block, - &swsusp_header, NULL); + error = bio_write_page(0, &swsusp_header); } else { return -EINVAL; } diff --git a/trunk/kernel/power/swsusp.c b/trunk/kernel/power/swsusp.c index 31aa0390c777..0b66659dc516 100644 --- a/trunk/kernel/power/swsusp.c +++ b/trunk/kernel/power/swsusp.c @@ -49,7 +49,6 @@ #include #include #include -#include #include "power.h" @@ -65,8 +64,10 @@ int in_suspend __nosavedata = 0; #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void); +int save_highmem(void); int restore_highmem(void); #else +static inline int save_highmem(void) { return 0; } static inline int restore_highmem(void) { return 0; } static inline unsigned int count_highmem_pages(void) { return 0; } #endif @@ -133,18 +134,18 @@ static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit) return 0; } -sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap) +unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap) { unsigned long offset; offset = swp_offset(get_swap_page_of_type(swap)); if (offset) { - if (bitmap_set(bitmap, offset)) + if (bitmap_set(bitmap, offset)) { swap_free(swp_entry(swap, offset)); - else - return swapdev_block(swap, offset); + offset = 0; + } } - return 0; + return offset; } void free_all_swap_pages(int swap, struct bitmap_page *bitmap) @@ -164,34 +165,6 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap) } } -/** - * swsusp_show_speed - print the time elapsed between two events represented by - * @start and @stop - * - * @nr_pages - number of pages processed between @start and @stop - * @msg - introductory message to print - */ - -void swsusp_show_speed(struct timeval *start, struct timeval *stop, - unsigned nr_pages, char *msg) -{ - s64 elapsed_centisecs64; - int centisecs; - int k; - int kps; - - elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); - do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); - centisecs = elapsed_centisecs64; - if (centisecs == 0) - centisecs = 1; /* avoid div-by-zero */ - k = nr_pages * (PAGE_SIZE / 1024); - kps = (k * 100) / centisecs; - printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k, - centisecs / 100, centisecs % 100, - kps / 1000, (kps % 1000) / 10); -} - /** * swsusp_shrink_memory - Try to free as much memory as needed * @@ -211,37 +184,23 @@ static inline unsigned long __shrink_memory(long tmp) int swsusp_shrink_memory(void) { - long tmp; + long size, tmp; struct zone *zone; unsigned long pages = 0; unsigned int i = 0; char *p = "-\\|/"; - struct timeval start, stop; printk("Shrinking memory... "); - do_gettimeofday(&start); do { - long size, highmem_size; - - highmem_size = count_highmem_pages(); - size = count_data_pages() + PAGES_FOR_IO; + size = 2 * count_highmem_pages(); + size += size / 50 + count_data_pages() + PAGES_FOR_IO; tmp = size; - size += highmem_size; for_each_zone (zone) - if (populated_zone(zone)) { - if (is_highmem(zone)) { - highmem_size -= zone->free_pages; - } else { - tmp -= zone->free_pages; - tmp += zone->lowmem_reserve[ZONE_NORMAL]; - tmp += snapshot_additional_pages(zone); - } + if (!is_highmem(zone) && populated_zone(zone)) { + tmp -= zone->free_pages; + tmp += zone->lowmem_reserve[ZONE_NORMAL]; + tmp += snapshot_additional_pages(zone); } - - if (highmem_size < 0) - highmem_size = 0; - - tmp += highmem_size; if (tmp > 0) { tmp = __shrink_memory(tmp); if (!tmp) @@ -253,9 +212,7 @@ int swsusp_shrink_memory(void) } printk("\b%c", p[i++%4]); } while (tmp > 0); - do_gettimeofday(&stop); printk("\bdone (%lu pages freed)\n", pages); - swsusp_show_speed(&start, &stop, pages, "Freed"); return 0; } @@ -266,7 +223,6 @@ int swsusp_suspend(void) if ((error = arch_prepare_suspend())) return error; - local_irq_disable(); /* At this point, device_suspend() has been called, but *not* * device_power_down(). We *must* device_power_down() now. @@ -279,16 +235,23 @@ int swsusp_suspend(void) goto Enable_irqs; } + if ((error = save_highmem())) { + printk(KERN_ERR "swsusp: Not enough free pages for highmem\n"); + goto Restore_highmem; + } + save_processor_state(); if ((error = swsusp_arch_suspend())) printk(KERN_ERR "Error %d suspending\n", error); /* Restore control flow magically appears here */ restore_processor_state(); +Restore_highmem: + restore_highmem(); /* NOTE: device_power_up() is just a resume() for devices * that suspended with irqs off ... no overall powerup. */ device_power_up(); - Enable_irqs: +Enable_irqs: local_irq_enable(); return error; } @@ -305,23 +268,18 @@ int swsusp_resume(void) printk(KERN_ERR "Some devices failed to power down, very bad\n"); /* We'll ignore saved state, but this gets preempt count (etc) right */ save_processor_state(); - error = restore_highmem(); - if (!error) { - error = swsusp_arch_resume(); - /* The code below is only ever reached in case of a failure. - * Otherwise execution continues at place where - * swsusp_arch_suspend() was called - */ - BUG_ON(!error); - /* This call to restore_highmem() undos the previous one */ - restore_highmem(); - } + error = swsusp_arch_resume(); + /* Code below is only ever reached in case of failure. Otherwise + * execution continues at place where swsusp_arch_suspend was called + */ + BUG_ON(!error); /* The only reason why swsusp_arch_resume() can fail is memory being * very tight, so we have to free it as soon as we can to avoid * subsequent failures */ swsusp_free(); restore_processor_state(); + restore_highmem(); touch_softlockup_watchdog(); device_power_up(); local_irq_enable(); diff --git a/trunk/kernel/power/user.c b/trunk/kernel/power/user.c index 89443b85163b..d991d3b0e5a4 100644 --- a/trunk/kernel/power/user.c +++ b/trunk/kernel/power/user.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -22,7 +21,6 @@ #include #include #include -#include #include @@ -56,8 +54,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { - data->swap = swsusp_resume_device ? - swap_type_of(swsusp_resume_device, 0) : -1; + data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1; data->mode = O_RDONLY; } else { data->swap = -1; @@ -79,10 +76,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) free_all_swap_pages(data->swap, data->bitmap); free_bitmap(data->bitmap); if (data->frozen) { - mutex_lock(&pm_mutex); + down(&pm_sem); thaw_processes(); enable_nonboot_cpus(); - mutex_unlock(&pm_mutex); + up(&pm_sem); } atomic_inc(&device_available); return 0; @@ -127,8 +124,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, { int error = 0; struct snapshot_data *data; - loff_t avail; - sector_t offset; + loff_t offset, avail; if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; @@ -144,7 +140,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, case SNAPSHOT_FREEZE: if (data->frozen) break; - mutex_lock(&pm_mutex); + down(&pm_sem); error = disable_nonboot_cpus(); if (!error) { error = freeze_processes(); @@ -154,7 +150,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, error = -EBUSY; } } - mutex_unlock(&pm_mutex); + up(&pm_sem); if (!error) data->frozen = 1; break; @@ -162,10 +158,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, case SNAPSHOT_UNFREEZE: if (!data->frozen) break; - mutex_lock(&pm_mutex); + down(&pm_sem); thaw_processes(); enable_nonboot_cpus(); - mutex_unlock(&pm_mutex); + up(&pm_sem); data->frozen = 0; break; @@ -174,7 +170,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, error = -EPERM; break; } - mutex_lock(&pm_mutex); + down(&pm_sem); /* Free memory before shutting down devices. */ error = swsusp_shrink_memory(); if (!error) { @@ -187,7 +183,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, } resume_console(); } - mutex_unlock(&pm_mutex); + up(&pm_sem); if (!error) error = put_user(in_suspend, (unsigned int __user *)arg); if (!error) @@ -195,13 +191,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, break; case SNAPSHOT_ATOMIC_RESTORE: - snapshot_write_finalize(&data->handle); if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } - mutex_lock(&pm_mutex); + snapshot_free_unused_memory(&data->handle); + down(&pm_sem); pm_prepare_console(); suspend_console(); error = device_suspend(PMSG_PRETHAW); @@ -211,7 +207,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, } resume_console(); pm_restore_console(); - mutex_unlock(&pm_mutex); + up(&pm_sem); break; case SNAPSHOT_FREE: @@ -242,10 +238,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, break; } } - offset = alloc_swapdev_block(data->swap, data->bitmap); + offset = alloc_swap_page(data->swap, data->bitmap); if (offset) { offset <<= PAGE_SHIFT; - error = put_user(offset, (sector_t __user *)arg); + error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } @@ -268,7 +264,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, * so we need to recode them */ if (old_decode_dev(arg)) { - data->swap = swap_type_of(old_decode_dev(arg), 0); + data->swap = swap_type_of(old_decode_dev(arg)); if (data->swap < 0) error = -ENODEV; } else { @@ -286,7 +282,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, break; } - if (!mutex_trylock(&pm_mutex)) { + if (down_trylock(&pm_sem)) { error = -EBUSY; break; } @@ -313,66 +309,8 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, if (pm_ops->finish) pm_ops->finish(PM_SUSPEND_MEM); - OutS3: - mutex_unlock(&pm_mutex); - break; - - case SNAPSHOT_PMOPS: - switch (arg) { - - case PMOPS_PREPARE: - if (pm_ops->prepare) { - error = pm_ops->prepare(PM_SUSPEND_DISK); - } - break; - - case PMOPS_ENTER: - kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); - error = pm_ops->enter(PM_SUSPEND_DISK); - break; - - case PMOPS_FINISH: - if (pm_ops && pm_ops->finish) { - pm_ops->finish(PM_SUSPEND_DISK); - } - break; - - default: - printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg); - error = -EINVAL; - - } - break; - - case SNAPSHOT_SET_SWAP_AREA: - if (data->bitmap) { - error = -EPERM; - } else { - struct resume_swap_area swap_area; - dev_t swdev; - - error = copy_from_user(&swap_area, (void __user *)arg, - sizeof(struct resume_swap_area)); - if (error) { - error = -EFAULT; - break; - } - - /* - * User space encodes device types as two-byte values, - * so we need to recode them - */ - swdev = old_decode_dev(swap_area.dev); - if (swdev) { - offset = swap_area.offset; - data->swap = swap_type_of(swdev, offset); - if (data->swap < 0) - error = -ENODEV; - } else { - data->swap = -1; - error = -EINVAL; - } - } +OutS3: + up(&pm_sem); break; default: @@ -383,7 +321,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, return error; } -static const struct file_operations snapshot_fops = { +static struct file_operations snapshot_fops = { .open = snapshot_open, .release = snapshot_release, .read = snapshot_read, diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index 185bb45eacf7..66426552fbfe 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -53,6 +53,8 @@ int console_printk[4] = { DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ }; +EXPORT_UNUSED_SYMBOL(console_printk); /* June 2006 */ + /* * Low lever drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. @@ -333,25 +335,13 @@ static void __call_console_drivers(unsigned long start, unsigned long end) } } -static int __read_mostly ignore_loglevel; - -int __init ignore_loglevel_setup(char *str) -{ - ignore_loglevel = 1; - printk(KERN_INFO "debug: ignoring loglevel setting.\n"); - - return 1; -} - -__setup("ignore_loglevel", ignore_loglevel_setup); - /* * Write out chars from start to end - 1 inclusive */ static void _call_console_drivers(unsigned long start, unsigned long end, int msg_log_level) { - if ((msg_log_level < console_loglevel || ignore_loglevel) && + if (msg_log_level < console_loglevel && console_drivers && start != end) { if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) { /* wrapped write */ @@ -641,7 +631,12 @@ EXPORT_SYMBOL(vprintk); asmlinkage long sys_syslog(int type, char __user *buf, int len) { - return -ENOSYS; + return 0; +} + +int do_syslog(int type, char __user *buf, int len) +{ + return 0; } static void call_console_drivers(unsigned long start, unsigned long end) @@ -782,6 +777,7 @@ int is_console_locked(void) { return console_locked; } +EXPORT_UNUSED_SYMBOL(is_console_locked); /* June 2006 */ /** * release_console_sem - unlock the console system diff --git a/trunk/kernel/profile.c b/trunk/kernel/profile.c index fb5e03d57e9d..f940b462eec9 100644 --- a/trunk/kernel/profile.c +++ b/trunk/kernel/profile.c @@ -40,7 +40,7 @@ int (*timer_hook)(struct pt_regs *) __read_mostly; static atomic_t *prof_buffer; static unsigned long prof_len, prof_shift; -int prof_on __read_mostly; +static int prof_on __read_mostly; static cpumask_t prof_cpu_mask = CPU_MASK_ALL; #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); @@ -51,19 +51,9 @@ static DEFINE_MUTEX(profile_flip_mutex); static int __init profile_setup(char * str) { static char __initdata schedstr[] = "schedule"; - static char __initdata sleepstr[] = "sleep"; int par; - if (!strncmp(str, sleepstr, strlen(sleepstr))) { - prof_on = SLEEP_PROFILING; - if (str[strlen(sleepstr)] == ',') - str += strlen(sleepstr) + 1; - if (get_option(&str, &par)) - prof_shift = par; - printk(KERN_INFO - "kernel sleep profiling enabled (shift: %ld)\n", - prof_shift); - } else if (!strncmp(str, sleepstr, strlen(sleepstr))) { + if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; if (str[strlen(schedstr)] == ',') str += strlen(schedstr) + 1; @@ -214,8 +204,7 @@ EXPORT_SYMBOL_GPL(profile_event_unregister); * positions to which hits are accounted during short intervals (e.g. * several seconds) is usually very small. Exclusion from buffer * flipping is provided by interrupt disablement (note that for - * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from - * process context). + * SCHED_PROFILING profile_hit() may be called from process context). * The hash function is meant to be lightweight as opposed to strong, * and was vaguely inspired by ppc64 firmware-supported inverted * pagetable hash functions, but uses a full hashtable full of finite @@ -268,7 +257,7 @@ static void profile_discard_flip_buffers(void) mutex_unlock(&profile_flip_mutex); } -void profile_hits(int type, void *__pc, unsigned int nr_hits) +void profile_hit(int type, void *__pc) { unsigned long primary, secondary, flags, pc = (unsigned long)__pc; int i, j, cpu; @@ -285,31 +274,21 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits) put_cpu(); return; } - /* - * We buffer the global profiler buffer into a per-CPU - * queue and thus reduce the number of global (and possibly - * NUMA-alien) accesses. The write-queue is self-coalescing: - */ local_irq_save(flags); do { for (j = 0; j < PROFILE_GRPSZ; ++j) { if (hits[i + j].pc == pc) { - hits[i + j].hits += nr_hits; + hits[i + j].hits++; goto out; } else if (!hits[i + j].hits) { hits[i + j].pc = pc; - hits[i + j].hits = nr_hits; + hits[i + j].hits = 1; goto out; } } i = (i + secondary) & (NR_PROFILE_HIT - 1); } while (i != primary); - - /* - * Add the current hit(s) and flush the write-queue out - * to the global buffer: - */ - atomic_add(nr_hits, &prof_buffer[pc]); + atomic_inc(&prof_buffer[pc]); for (i = 0; i < NR_PROFILE_HIT; ++i) { atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); hits[i].pc = hits[i].hits = 0; @@ -319,6 +298,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits) put_cpu(); } +#ifdef CONFIG_HOTPLUG_CPU static int __devinit profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { @@ -371,19 +351,19 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, } return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ #else /* !CONFIG_SMP */ #define profile_flip_buffers() do { } while (0) #define profile_discard_flip_buffers() do { } while (0) -#define profile_cpu_callback NULL -void profile_hits(int type, void *__pc, unsigned int nr_hits) +void profile_hit(int type, void *__pc) { unsigned long pc; if (prof_on != type || !prof_buffer) return; pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); + atomic_inc(&prof_buffer[min(pc, prof_len - 1)]); } #endif /* !CONFIG_SMP */ @@ -462,8 +442,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) read = 0; while (p < sizeof(unsigned int) && count > 0) { - if (put_user(*((char *)(&sample_step)+p),buf)) - return -EFAULT; + put_user(*((char *)(&sample_step)+p),buf); buf++; p++; count--; read++; } pnt = (char *)prof_buffer + p - sizeof(atomic_t); @@ -501,7 +480,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, return count; } -static const struct file_operations proc_profile_operations = { +static struct file_operations proc_profile_operations = { .read = read_profile, .write = write_profile, }; diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 3554b76da84c..26bb5ffe1ef1 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -235,14 +235,12 @@ static void rcu_do_batch(struct rcu_data *rdp) list = rdp->donelist; while (list) { - next = list->next; - prefetch(next); + next = rdp->donelist = list->next; list->func(list); list = next; if (++count >= rdp->blimit) break; } - rdp->donelist = list; local_irq_disable(); rdp->qlen -= count; diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index c52f981ea008..e2bda18f6f42 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -401,7 +401,7 @@ static void srcu_torture_cleanup(void) cleanup_srcu_struct(&srcu_ctl); } -static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) +static int srcu_torture_read_lock(void) { return srcu_read_lock(&srcu_ctl); } @@ -419,7 +419,7 @@ static void srcu_read_delay(struct rcu_random_state *rrsp) schedule_timeout_interruptible(longdelay); } -static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) +static void srcu_torture_read_unlock(int idx) { srcu_read_unlock(&srcu_ctl, idx); } diff --git a/trunk/kernel/relay.c b/trunk/kernel/relay.c index 75a3a9a7efc2..f04bbdb56ac2 100644 --- a/trunk/kernel/relay.c +++ b/trunk/kernel/relay.c @@ -308,10 +308,9 @@ static struct rchan_callbacks default_channel_callbacks = { * reason waking is deferred is that calling directly from write * causes problems if you're writing from say the scheduler. */ -static void wakeup_readers(struct work_struct *work) +static void wakeup_readers(void *private) { - struct rchan_buf *buf = - container_of(work, struct rchan_buf, wake_readers.work); + struct rchan_buf *buf = private; wake_up_interruptible(&buf->read_wait); } @@ -329,7 +328,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); - INIT_DELAYED_WORK(&buf->wake_readers, NULL); + INIT_WORK(&buf->wake_readers, NULL, NULL); } else { cancel_delayed_work(&buf->wake_readers); flush_scheduled_work(); @@ -550,8 +549,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) buf->padding[old_subbuf]; smp_mb(); if (waitqueue_active(&buf->read_wait)) { - PREPARE_DELAYED_WORK(&buf->wake_readers, - wakeup_readers); + PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); schedule_delayed_work(&buf->wake_readers, 1); } } @@ -1013,7 +1011,7 @@ static ssize_t relay_file_sendfile(struct file *filp, actor, &desc); } -const struct file_operations relay_file_operations = { +struct file_operations relay_file_operations = { .open = relay_file_open, .poll = relay_file_poll, .mmap = relay_file_mmap, diff --git a/trunk/kernel/resource.c b/trunk/kernel/resource.c index 7b9a497419d9..6de60c12143e 100644 --- a/trunk/kernel/resource.c +++ b/trunk/kernel/resource.c @@ -88,7 +88,7 @@ static int r_show(struct seq_file *m, void *v) return 0; } -static const struct seq_operations resource_op = { +static struct seq_operations resource_op = { .start = r_start, .next = r_next, .stop = r_stop, @@ -115,14 +115,14 @@ static int iomem_open(struct inode *inode, struct file *file) return res; } -static const struct file_operations proc_ioports_operations = { +static struct file_operations proc_ioports_operations = { .open = ioports_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; -static const struct file_operations proc_iomem_operations = { +static struct file_operations proc_iomem_operations = { .open = iomem_open, .read = seq_read, .llseek = seq_lseek, diff --git a/trunk/kernel/rtmutex-tester.c b/trunk/kernel/rtmutex-tester.c index 015fc633c96c..6dcea9dd8c94 100644 --- a/trunk/kernel/rtmutex-tester.c +++ b/trunk/kernel/rtmutex-tester.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "rtmutex.h" diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index f385eff4682d..3399701c680e 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include @@ -505,7 +505,7 @@ static int schedstat_open(struct inode *inode, struct file *file) return res; } -const struct file_operations proc_schedstat_operations = { +struct file_operations proc_schedstat_operations = { .open = schedstat_open, .read = seq_read, .llseek = seq_lseek, @@ -948,17 +948,6 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local) } #endif - /* - * Sleep time is in units of nanosecs, so shift by 20 to get a - * milliseconds-range estimation of the amount of time that the task - * spent sleeping: - */ - if (unlikely(prof_on == SLEEP_PROFILING)) { - if (p->state == TASK_UNINTERRUPTIBLE) - profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), - (now - p->timestamp) >> 20); - } - if (!rt_task(p)) p->prio = recalc_task_prio(p, now); @@ -3344,7 +3333,6 @@ asmlinkage void __sched schedule(void) printk(KERN_ERR "BUG: scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); - debug_show_held_locks(current); dump_stack(); } profile_hit(SCHED_PROFILING, __builtin_return_address(0)); @@ -4816,18 +4804,18 @@ static void show_task(struct task_struct *p) show_stack(p, NULL); } -void show_state_filter(unsigned long state_filter) +void show_state(void) { struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" - " free sibling\n"); - printk(" task PC stack pid father child younger older\n"); + " sibling\n"); + printk(" task PC pid father child younger older\n"); #else printk("\n" - " free sibling\n"); - printk(" task PC stack pid father child younger older\n"); + " sibling\n"); + printk(" task PC pid father child younger older\n"); #endif read_lock(&tasklist_lock); do_each_thread(g, p) { @@ -4836,16 +4824,11 @@ void show_state_filter(unsigned long state_filter) * console might take alot of time: */ touch_nmi_watchdog(); - if (p->state & state_filter) - show_task(p); + show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); - /* - * Only show locks if all tasks are dumped: - */ - if (state_filter == -1) - debug_show_all_locks(); + debug_show_all_locks(); } /** @@ -6740,6 +6723,8 @@ SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif + +#ifdef CONFIG_HOTPLUG_CPU /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing @@ -6772,6 +6757,7 @@ static int update_sched_domains(struct notifier_block *nfb, return NOTIFY_OK; } +#endif void __init sched_init_smp(void) { @@ -6881,7 +6867,6 @@ void __might_sleep(char *file, int line) " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); - debug_show_held_locks(current); dump_stack(); } #endif diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index ec81defde339..df18c167a2a7 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -34,7 +33,7 @@ * SLAB caches for signal bits. */ -static struct kmem_cache *sigqueue_cachep; +static kmem_cache_t *sigqueue_cachep; /* * In POSIX a signal is sent either to a specific thread (Linux task) @@ -1134,7 +1133,8 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) return error; } -static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int +kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 918e52df090e..bf25015dce16 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -574,6 +574,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + BUG_ON(per_cpu(tasklet_vec, hotcpu).list); + BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list); p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); if (IS_ERR(p)) { printk("ksoftirqd for %i failed\n", hotcpu); diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index a0c1a29a507f..98489d82801b 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user return 0; } -static void deferred_cad(struct work_struct *dummy) +static void deferred_cad(void *dummy) { kernel_restart(NULL); } @@ -892,7 +892,7 @@ static void deferred_cad(struct work_struct *dummy) */ void ctrl_alt_del(void) { - static DECLARE_WORK(cad_work, deferred_cad); + static DECLARE_WORK(cad_work, deferred_cad, NULL); if (C_A_D) schedule_work(&cad_work); @@ -1102,14 +1102,14 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) asmlinkage long sys_setuid(uid_t uid) { int old_euid = current->euid; - int old_ruid, old_suid, new_suid; + int old_ruid, old_suid, new_ruid, new_suid; int retval; retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); if (retval) return retval; - old_ruid = current->uid; + old_ruid = new_ruid = current->uid; old_suid = current->suid; new_suid = old_suid; diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 758dbbf972a5..09e569f4792b 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -170,7 +170,7 @@ static ssize_t proc_readsys(struct file *, char __user *, size_t, loff_t *); static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *); static int proc_opensys(struct inode *, struct file *); -const struct file_operations proc_sys_file_operations = { +struct file_operations proc_sys_file_operations = { .open = proc_opensys, .read = proc_readsys, .write = proc_writesys, @@ -977,6 +977,17 @@ static ctl_table vm_table[] = { .extra1 = &zero, }, #endif +#ifdef CONFIG_SWAP + { + .ctl_name = VM_SWAP_TOKEN_TIMEOUT, + .procname = "swap_token_timeout", + .data = &swap_token_default_timeout, + .maxlen = sizeof(swap_token_default_timeout), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, +#endif #ifdef CONFIG_NUMA { .ctl_name = VM_ZONE_RECLAIM_MODE, @@ -1875,7 +1886,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table, p = buf; if (*p == '-' && left > 1) { neg = 1; - p++; + left--, p++; } if (*p < '0' || *p > '9') break; @@ -2126,7 +2137,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write, p = buf; if (*p == '-' && left > 1) { neg = 1; - p++; + left--, p++; } if (*p < '0' || *p > '9') break; diff --git a/trunk/kernel/taskstats.c b/trunk/kernel/taskstats.c index 4c3476fa058d..d3d28919d4b4 100644 --- a/trunk/kernel/taskstats.c +++ b/trunk/kernel/taskstats.c @@ -34,7 +34,7 @@ static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; static int family_registered; -struct kmem_cache *taskstats_cache; +kmem_cache_t *taskstats_cache; static struct genl_family family = { .id = GENL_ID_GENERATE, @@ -69,7 +69,7 @@ enum actions { }; static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, - size_t size) + void **replyp, size_t size) { struct sk_buff *skb; void *reply; @@ -94,6 +94,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, } *skbp = skb; + *replyp = reply; return 0; } @@ -118,10 +119,10 @@ static int send_reply(struct sk_buff *skb, pid_t pid) /* * Send taskstats data in @skb to listeners registered for @cpu's exit data */ -static void send_cpu_listeners(struct sk_buff *skb, - struct listener_list *listeners) +static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) { struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); + struct listener_list *listeners; struct listener *s, *tmp; struct sk_buff *skb_next, *skb_cur = skb; void *reply = genlmsg_data(genlhdr); @@ -134,6 +135,7 @@ static void send_cpu_listeners(struct sk_buff *skb, } rc = 0; + listeners = &per_cpu(listener_array, cpu); down_read(&listeners->sem); list_for_each_entry(s, &listeners->list, list) { skb_next = NULL; @@ -184,7 +186,6 @@ static int fill_pid(pid_t pid, struct task_struct *tsk, } else get_task_struct(tsk); - memset(stats, 0, sizeof(*stats)); /* * Each accounting subsystem adds calls to its functions to * fill in relevant parts of struct taskstsats as follows @@ -227,8 +228,6 @@ static int fill_tgid(pid_t tgid, struct task_struct *first, if (first->signal->stats) memcpy(stats, first->signal->stats, sizeof(*stats)); - else - memset(stats, 0, sizeof(*stats)); tsk = first; do { @@ -345,36 +344,14 @@ static int parse(struct nlattr *na, cpumask_t *mask) return ret; } -static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) -{ - struct nlattr *na, *ret; - int aggr; - - aggr = (type == TASKSTATS_TYPE_PID) - ? TASKSTATS_TYPE_AGGR_PID - : TASKSTATS_TYPE_AGGR_TGID; - - na = nla_nest_start(skb, aggr); - if (!na) - goto err; - if (nla_put(skb, type, sizeof(pid), &pid) < 0) - goto err; - ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); - if (!ret) - goto err; - nla_nest_end(skb, na); - - return nla_data(ret); -err: - return NULL; -} - static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) { int rc = 0; struct sk_buff *rep_skb; - struct taskstats *stats; + struct taskstats stats; + void *reply; size_t size; + struct nlattr *na; cpumask_t mask; rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); @@ -395,71 +372,83 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) size = nla_total_size(sizeof(u32)) + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); - rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); + memset(&stats, 0, sizeof(stats)); + rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); if (rc < 0) return rc; - rc = -EINVAL; if (info->attrs[TASKSTATS_CMD_ATTR_PID]) { u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); - stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); - if (!stats) - goto err; - - rc = fill_pid(pid, NULL, stats); + rc = fill_pid(pid, NULL, &stats); if (rc < 0) goto err; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + stats); } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) { u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); - stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); - if (!stats) - goto err; - - rc = fill_tgid(tgid, NULL, stats); + rc = fill_tgid(tgid, NULL, &stats); if (rc < 0) goto err; - } else + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + stats); + } else { + rc = -EINVAL; goto err; + } + + nla_nest_end(rep_skb, na); return send_reply(rep_skb, info->snd_pid); + +nla_put_failure: + rc = genlmsg_cancel(rep_skb, reply); err: nlmsg_free(rep_skb); return rc; } -static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) +void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) { - struct signal_struct *sig = tsk->signal; - struct taskstats *stats; - - if (sig->stats || thread_group_empty(tsk)) - goto ret; + struct listener_list *listeners; + struct taskstats *tmp; + /* + * This is the cpu on which the task is exiting currently and will + * be the one for which the exit event is sent, even if the cpu + * on which this function is running changes later. + */ + *mycpu = raw_smp_processor_id(); - /* No problem if kmem_cache_zalloc() fails */ - stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); + *ptidstats = NULL; + tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); + if (!tmp) + return; - spin_lock_irq(&tsk->sighand->siglock); - if (!sig->stats) { - sig->stats = stats; - stats = NULL; + listeners = &per_cpu(listener_array, *mycpu); + down_read(&listeners->sem); + if (!list_empty(&listeners->list)) { + *ptidstats = tmp; + tmp = NULL; } - spin_unlock_irq(&tsk->sighand->siglock); - - if (stats) - kmem_cache_free(taskstats_cache, stats); -ret: - return sig->stats; + up_read(&listeners->sem); + kfree(tmp); } /* Send pid data out on exit */ -void taskstats_exit(struct task_struct *tsk, int group_dead) +void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, + int group_dead, unsigned int mycpu) { int rc; - struct listener_list *listeners; - struct taskstats *stats; struct sk_buff *rep_skb; + void *reply; size_t size; int is_thread_group; + struct nlattr *na; if (!family_registered) return; @@ -470,7 +459,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) size = nla_total_size(sizeof(u32)) + nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); - is_thread_group = !!taskstats_tgid_alloc(tsk); + is_thread_group = (tsk->signal->stats != NULL); if (is_thread_group) { /* PID + STATS + TGID + STATS */ size = 2 * size; @@ -478,39 +467,49 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) fill_tgid_exit(tsk); } - listeners = &__raw_get_cpu_var(listener_array); - if (list_empty(&listeners->list)) + if (!tidstats) return; - rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); + rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); if (rc < 0) - return; - - stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid); - if (!stats) - goto err; + goto ret; - rc = fill_pid(tsk->pid, tsk, stats); + rc = fill_pid(tsk->pid, tsk, tidstats); if (rc < 0) - goto err; + goto err_skb; + + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid); + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + *tidstats); + nla_nest_end(rep_skb, na); + + if (!is_thread_group) + goto send; /* * Doesn't matter if tsk is the leader or the last group member leaving */ - if (!is_thread_group || !group_dead) + if (!group_dead) goto send; - stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid); - if (!stats) - goto err; - - memcpy(stats, tsk->signal->stats, sizeof(*stats)); + na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID); + NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid); + /* No locking needed for tsk->signal->stats since group is dead */ + NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS, + *tsk->signal->stats); + nla_nest_end(rep_skb, na); send: - send_cpu_listeners(rep_skb, listeners); + send_cpu_listeners(rep_skb, mycpu); return; -err: + +nla_put_failure: + genlmsg_cancel(rep_skb, reply); +err_skb: nlmsg_free(rep_skb); +ret: + return; } static struct genl_ops taskstats_ops = { diff --git a/trunk/kernel/user.c b/trunk/kernel/user.c index 4869563080e9..220e586127a0 100644 --- a/trunk/kernel/user.c +++ b/trunk/kernel/user.c @@ -26,7 +26,7 @@ #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) -static struct kmem_cache *uid_cachep; +static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; /* @@ -132,7 +132,7 @@ struct user_struct * alloc_uid(uid_t uid) if (!up) { struct user_struct *new; - new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); + new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); if (!new) return NULL; new->uid = uid; diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index c5257316f4b9..17c2f03d2c27 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -29,9 +29,6 @@ #include #include #include -#include -#include -#include /* * The per-CPU workqueue (if single thread, we always use the first @@ -58,8 +55,6 @@ struct cpu_workqueue_struct { struct task_struct *thread; int run_depth; /* Detect run_workqueue() recursion depth */ - - int freezeable; /* Freeze the thread during suspend */ } ____cacheline_aligned; /* @@ -85,29 +80,6 @@ static inline int is_single_threaded(struct workqueue_struct *wq) return list_empty(&wq->list); } -static inline void set_wq_data(struct work_struct *work, void *wq) -{ - unsigned long new, old, res; - - /* assume the pending flag is already set and that the task has already - * been queued on this workqueue */ - new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); - res = work->management; - if (res != new) { - do { - old = res; - new = (unsigned long) wq; - new |= (old & WORK_STRUCT_FLAG_MASK); - res = cmpxchg(&work->management, old, new); - } while (res != old); - } -} - -static inline void *get_wq_data(struct work_struct *work) -{ - return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); -} - /* Preempt must be disabled. */ static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) @@ -115,7 +87,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, unsigned long flags; spin_lock_irqsave(&cwq->lock, flags); - set_wq_data(work, cwq); + work->wq_data = cwq; list_add_tail(&work->entry, &cwq->worklist); cwq->insert_sequence++; wake_up(&cwq->more_work); @@ -136,7 +108,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(0, &work->pending)) { if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); @@ -150,42 +122,38 @@ EXPORT_SYMBOL_GPL(queue_work); static void delayed_work_timer_fn(unsigned long __data) { - struct delayed_work *dwork = (struct delayed_work *)__data; - struct workqueue_struct *wq = get_wq_data(&dwork->work); + struct work_struct *work = (struct work_struct *)__data; + struct workqueue_struct *wq = work->wq_data; int cpu = smp_processor_id(); if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; - __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); + __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } /** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use - * @work: delayable work to queue + * @work: work to queue * @delay: number of jiffies to wait before queueing * * Returns 0 if @work was already on a queue, non-zero otherwise. */ int fastcall queue_delayed_work(struct workqueue_struct *wq, - struct delayed_work *dwork, unsigned long delay) + struct work_struct *work, unsigned long delay) { int ret = 0; - struct timer_list *timer = &dwork->timer; - struct work_struct *work = &dwork->work; - - if (delay == 0) - return queue_work(wq, work); + struct timer_list *timer = &work->timer; - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(0, &work->pending)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); /* This stores wq for the moment, for the timer_fn */ - set_wq_data(work, wq); + work->wq_data = wq; timer->expires = jiffies + delay; - timer->data = (unsigned long)dwork; + timer->data = (unsigned long)work; timer->function = delayed_work_timer_fn; add_timer(timer); ret = 1; @@ -204,20 +172,19 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); * Returns 0 if @work was already on a queue, non-zero otherwise. */ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct delayed_work *dwork, unsigned long delay) + struct work_struct *work, unsigned long delay) { int ret = 0; - struct timer_list *timer = &dwork->timer; - struct work_struct *work = &dwork->work; + struct timer_list *timer = &work->timer; - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(0, &work->pending)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); /* This stores wq for the moment, for the timer_fn */ - set_wq_data(work, wq); + work->wq_data = wq; timer->expires = jiffies + delay; - timer->data = (unsigned long)dwork; + timer->data = (unsigned long)work; timer->function = delayed_work_timer_fn; add_timer_on(timer, cpu); ret = 1; @@ -245,26 +212,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - work_func_t f = work->func; + void (*f) (void *) = work->func; + void *data = work->data; list_del_init(cwq->worklist.next); spin_unlock_irqrestore(&cwq->lock, flags); - BUG_ON(get_wq_data(work) != cwq); - if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) - work_release(work); - f(work); - - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), - current->pid); - printk(KERN_ERR " last function: "); - print_symbol("%s\n", (unsigned long)f); - debug_show_held_locks(current); - dump_stack(); - } + BUG_ON(work->wq_data != cwq); + clear_bit(0, &work->pending); + f(data); spin_lock_irqsave(&cwq->lock, flags); cwq->remove_sequence++; @@ -281,8 +237,7 @@ static int worker_thread(void *__cwq) struct k_sigaction sa; sigset_t blocked; - if (!cwq->freezeable) - current->flags |= PF_NOFREEZE; + current->flags |= PF_NOFREEZE; set_user_nice(current, -5); @@ -305,9 +260,6 @@ static int worker_thread(void *__cwq) set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { - if (cwq->freezeable) - try_to_freeze(); - add_wait_queue(&cwq->more_work, &wait); if (list_empty(&cwq->worklist)) schedule(); @@ -384,7 +336,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) EXPORT_SYMBOL_GPL(flush_workqueue); static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, - int cpu, int freezeable) + int cpu) { struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); struct task_struct *p; @@ -394,7 +346,6 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, cwq->thread = NULL; cwq->insert_sequence = 0; cwq->remove_sequence = 0; - cwq->freezeable = freezeable; INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); init_waitqueue_head(&cwq->work_done); @@ -410,7 +361,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, } struct workqueue_struct *__create_workqueue(const char *name, - int singlethread, int freezeable) + int singlethread) { int cpu, destroy = 0; struct workqueue_struct *wq; @@ -430,7 +381,7 @@ struct workqueue_struct *__create_workqueue(const char *name, mutex_lock(&workqueue_mutex); if (singlethread) { INIT_LIST_HEAD(&wq->list); - p = create_workqueue_thread(wq, singlethread_cpu, freezeable); + p = create_workqueue_thread(wq, singlethread_cpu); if (!p) destroy = 1; else @@ -438,7 +389,7 @@ struct workqueue_struct *__create_workqueue(const char *name, } else { list_add(&wq->list, &workqueues); for_each_online_cpu(cpu) { - p = create_workqueue_thread(wq, cpu, freezeable); + p = create_workqueue_thread(wq, cpu); if (p) { kthread_bind(p, cpu); wake_up_process(p); @@ -517,37 +468,38 @@ EXPORT_SYMBOL(schedule_work); /** * schedule_delayed_work - put work task in global workqueue after delay - * @dwork: job to be done - * @delay: number of jiffies to wait or 0 for immediate execution + * @work: job to be done + * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue. */ -int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) +int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) { - return queue_delayed_work(keventd_wq, dwork, delay); + return queue_delayed_work(keventd_wq, work, delay); } EXPORT_SYMBOL(schedule_delayed_work); /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use - * @dwork: job to be done + * @work: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ int schedule_delayed_work_on(int cpu, - struct delayed_work *dwork, unsigned long delay) + struct work_struct *work, unsigned long delay) { - return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); + return queue_delayed_work_on(cpu, keventd_wq, work, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); /** * schedule_on_each_cpu - call a function on each online CPU from keventd * @func: the function to call + * @info: a pointer to pass to func() * * Returns zero on success. * Returns -ve errno on failure. @@ -556,7 +508,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); * * schedule_on_each_cpu() is very slow. */ -int schedule_on_each_cpu(work_func_t func) +int schedule_on_each_cpu(void (*func)(void *info), void *info) { int cpu; struct work_struct *works; @@ -567,7 +519,7 @@ int schedule_on_each_cpu(work_func_t func) mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) { - INIT_WORK(per_cpu_ptr(works, cpu), func); + INIT_WORK(per_cpu_ptr(works, cpu), func, info); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), per_cpu_ptr(works, cpu)); } @@ -587,12 +539,12 @@ EXPORT_SYMBOL(flush_scheduled_work); * cancel_rearming_delayed_workqueue - reliably kill off a delayed * work whose handler rearms the delayed work. * @wq: the controlling workqueue structure - * @dwork: the delayed work struct + * @work: the delayed work struct */ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, - struct delayed_work *dwork) + struct work_struct *work) { - while (!cancel_delayed_work(dwork)) + while (!cancel_delayed_work(work)) flush_workqueue(wq); } EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); @@ -600,17 +552,18 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); /** * cancel_rearming_delayed_work - reliably kill off a delayed keventd * work whose handler rearms the delayed work. - * @dwork: the delayed work struct + * @work: the delayed work struct */ -void cancel_rearming_delayed_work(struct delayed_work *dwork) +void cancel_rearming_delayed_work(struct work_struct *work) { - cancel_rearming_delayed_workqueue(keventd_wq, dwork); + cancel_rearming_delayed_workqueue(keventd_wq, work); } EXPORT_SYMBOL(cancel_rearming_delayed_work); /** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute + * @data: data to pass to the function * @ew: guaranteed storage for the execute work structure (must * be available when the work executes) * @@ -620,14 +573,15 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work); * Returns: 0 - function was executed * 1 - function was scheduled for execution */ -int execute_in_process_context(work_func_t fn, struct execute_work *ew) +int execute_in_process_context(void (*fn)(void *data), void *data, + struct execute_work *ew) { if (!in_interrupt()) { - fn(&ew->work); + fn(data); return 0; } - INIT_WORK(&ew->work, fn); + INIT_WORK(&ew->work, fn, data); schedule_work(&ew->work); return 1; @@ -655,6 +609,7 @@ int current_is_keventd(void) } +#ifdef CONFIG_HOTPLUG_CPU /* Take the work from this (downed) CPU. */ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) { @@ -687,7 +642,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, mutex_lock(&workqueue_mutex); /* Create a new workqueue thread for it. */ list_for_each_entry(wq, &workqueues, list) { - if (!create_workqueue_thread(wq, hotcpu, 0)) { + if (!create_workqueue_thread(wq, hotcpu)) { printk("workqueue for %i failed\n", hotcpu); return NOTIFY_BAD; } @@ -737,6 +692,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } +#endif void init_workqueues(void) { diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index b75fed737f25..d3679103a8e4 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -1,7 +1,6 @@ config PRINTK_TIME bool "Show timing information on printks" - depends on PRINTK help Selecting this option causes timing information to be included in printk output. This allows you to measure diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index fea8f9035f07..cf98fabaa549 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -25,7 +25,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o -obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o +lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/trunk/lib/cmdline.c b/trunk/lib/cmdline.c index 8a5b5303bd4f..0331ed825ea7 100644 --- a/trunk/lib/cmdline.c +++ b/trunk/lib/cmdline.c @@ -16,23 +16,6 @@ #include #include -/* - * If a hyphen was found in get_option, this will handle the - * range of numbers, M-N. This will expand the range and insert - * the values[M, M+1, ..., N] into the ints array in get_options. - */ - -static int get_range(char **str, int *pint) -{ - int x, inc_counter, upper_range; - - (*str)++; - upper_range = simple_strtol((*str), NULL, 0); - inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) - *pint++ = x; - return inc_counter; -} /** * get_option - Parse integer from an option string @@ -46,7 +29,6 @@ static int get_range(char **str, int *pint) * 0 : no int in string * 1 : int found, no subsequent comma * 2 : int found including a subsequent comma - * 3 : hyphen found to denote a range */ int get_option (char **str, int *pint) @@ -62,8 +44,6 @@ int get_option (char **str, int *pint) (*str)++; return 2; } - if (**str == '-') - return 3; return 1; } @@ -75,8 +55,7 @@ int get_option (char **str, int *pint) * @ints: integer array * * This function parses a string containing a comma-separated - * list of integers, a hyphen-separated range of _positive_ integers, - * or a combination of both. The parse halts when the array is + * list of integers. The parse halts when the array is * full, or when no more numbers can be retrieved from the * string. * @@ -93,18 +72,6 @@ char *get_options(const char *str, int nints, int *ints) res = get_option ((char **)&str, ints + i); if (res == 0) break; - if (res == 3) { - int range_nums; - range_nums = get_range((char **)&str, ints + i); - if (range_nums < 0) - break; - /* - * Decrement the result by one to leave out the - * last number in the range. The next iteration - * will handle the upper number in the range - */ - i += (range_nums - 1); - } i++; if (res == 1) break; diff --git a/trunk/lib/idr.c b/trunk/lib/idr.c index 71853531d3b0..16d2143fea48 100644 --- a/trunk/lib/idr.c +++ b/trunk/lib/idr.c @@ -33,7 +33,7 @@ #include #include -static struct kmem_cache *idr_layer_cache; +static kmem_cache_t *idr_layer_cache; static struct idr_layer *alloc_layer(struct idr *idp) { @@ -445,7 +445,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, +static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, unsigned long flags) { memset(idr_layer, 0, sizeof(struct idr_layer)); diff --git a/trunk/lib/kobject.c b/trunk/lib/kobject.c index 7ce6dc138e90..744a4b102c7f 100644 --- a/trunk/lib/kobject.c +++ b/trunk/lib/kobject.c @@ -111,9 +111,10 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) len = get_kobj_path_length(kobj); if (len == 0) return NULL; - path = kzalloc(len, gfp_mask); + path = kmalloc(len, gfp_mask); if (!path) return NULL; + memset(path, 0x00, len); fill_kobj_path(kobj, path, len); return path; diff --git a/trunk/lib/list_debug.c b/trunk/lib/list_debug.c index 4350ba9655bd..7ba9d823d388 100644 --- a/trunk/lib/list_debug.c +++ b/trunk/lib/list_debug.c @@ -21,15 +21,13 @@ void __list_add(struct list_head *new, struct list_head *next) { if (unlikely(next->prev != prev)) { - printk(KERN_ERR "list_add corruption. next->prev should be " - "prev (%p), but was %p. (next=%p).\n", - prev, next->prev, next); + printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n", + prev, next->prev); BUG(); } if (unlikely(prev->next != next)) { - printk(KERN_ERR "list_add corruption. prev->next should be " - "next (%p), but was %p. (prev=%p).\n", - next, prev->next, prev); + printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n", + next, prev->next); BUG(); } next->prev = new; diff --git a/trunk/lib/locking-selftest.c b/trunk/lib/locking-selftest.c index 280332c1827c..7945787f439a 100644 --- a/trunk/lib/locking-selftest.c +++ b/trunk/lib/locking-selftest.c @@ -963,9 +963,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) printk("failed|"); } else { unexpected_testcase_failures++; - printk("FAILED|"); - dump_stack(); } } else { testcase_successes++; diff --git a/trunk/lib/radix-tree.c b/trunk/lib/radix-tree.c index d69ddbe43865..aa9bfd0bdbd1 100644 --- a/trunk/lib/radix-tree.c +++ b/trunk/lib/radix-tree.c @@ -2,7 +2,6 @@ * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2005 SGI, Christoph Lameter - * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -31,7 +30,6 @@ #include #include #include -#include #ifdef __KERNEL__ @@ -47,9 +45,7 @@ ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) struct radix_tree_node { - unsigned int height; /* Height from the bottom */ unsigned int count; - struct rcu_head rcu_head; void *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; @@ -67,7 +63,7 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly; /* * Radix tree node cache. */ -static struct kmem_cache *radix_tree_node_cachep; +static kmem_cache_t *radix_tree_node_cachep; /* * Per-cpu pool of preloaded nodes @@ -104,21 +100,13 @@ radix_tree_node_alloc(struct radix_tree_root *root) rtp->nr--; } } - BUG_ON(radix_tree_is_direct_ptr(ret)); return ret; } -static void radix_tree_node_rcu_free(struct rcu_head *head) -{ - struct radix_tree_node *node = - container_of(head, struct radix_tree_node, rcu_head); - kmem_cache_free(radix_tree_node_cachep, node); -} - static inline void radix_tree_node_free(struct radix_tree_node *node) { - call_rcu(&node->rcu_head, radix_tree_node_rcu_free); + kmem_cache_free(radix_tree_node_cachep, node); } /* @@ -234,12 +222,11 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) } do { - unsigned int newheight; if (!(node = radix_tree_node_alloc(root))) return -ENOMEM; /* Increase the height. */ - node->slots[0] = radix_tree_direct_to_ptr(root->rnode); + node->slots[0] = root->rnode; /* Propagate the aggregated tag info into the new root */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { @@ -247,11 +234,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) tag_set(node, tag, 0); } - newheight = root->height+1; - node->height = newheight; node->count = 1; - rcu_assign_pointer(root->rnode, node); - root->height = newheight; + root->rnode = node; + root->height++; } while (height > root->height); out: return 0; @@ -273,8 +258,6 @@ int radix_tree_insert(struct radix_tree_root *root, int offset; int error; - BUG_ON(radix_tree_is_direct_ptr(item)); - /* Make sure the tree is high enough. */ if (index > radix_tree_maxindex(root->height)) { error = radix_tree_extend(root, index); @@ -292,12 +275,11 @@ int radix_tree_insert(struct radix_tree_root *root, /* Have to add a child node. */ if (!(slot = radix_tree_node_alloc(root))) return -ENOMEM; - slot->height = height; if (node) { - rcu_assign_pointer(node->slots[offset], slot); + node->slots[offset] = slot; node->count++; } else - rcu_assign_pointer(root->rnode, slot); + root->rnode = slot; } /* Go a level down */ @@ -313,11 +295,11 @@ int radix_tree_insert(struct radix_tree_root *root, if (node) { node->count++; - rcu_assign_pointer(node->slots[offset], item); + node->slots[offset] = item; BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); } else { - rcu_assign_pointer(root->rnode, radix_tree_ptr_to_direct(item)); + root->rnode = item; BUG_ON(root_tag_get(root, 0)); BUG_ON(root_tag_get(root, 1)); } @@ -326,54 +308,49 @@ int radix_tree_insert(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_insert); -/** - * radix_tree_lookup_slot - lookup a slot in a radix tree - * @root: radix tree root - * @index: index key - * - * Returns: the slot corresponding to the position @index in the - * radix tree @root. This is useful for update-if-exists operations. - * - * This function cannot be called under rcu_read_lock, it must be - * excluded from writers, as must the returned slot for subsequent - * use by radix_tree_deref_slot() and radix_tree_replace slot. - * Caller must hold tree write locked across slot lookup and - * replace. - */ -void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +static inline void **__lookup_slot(struct radix_tree_root *root, + unsigned long index) { unsigned int height, shift; - struct radix_tree_node *node, **slot; - - node = root->rnode; - if (node == NULL) - return NULL; + struct radix_tree_node **slot; - if (radix_tree_is_direct_ptr(node)) { - if (index > 0) - return NULL; - return (void **)&root->rnode; - } + height = root->height; - height = node->height; if (index > radix_tree_maxindex(height)) return NULL; + if (height == 0 && root->rnode) + return (void **)&root->rnode; + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + slot = &root->rnode; - do { - slot = (struct radix_tree_node **) - (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = *slot; - if (node == NULL) + while (height > 0) { + if (*slot == NULL) return NULL; + slot = (struct radix_tree_node **) + ((*slot)->slots + + ((index >> shift) & RADIX_TREE_MAP_MASK)); shift -= RADIX_TREE_MAP_SHIFT; height--; - } while (height > 0); + } return (void **)slot; } + +/** + * radix_tree_lookup_slot - lookup a slot in a radix tree + * @root: radix tree root + * @index: index key + * + * Lookup the slot corresponding to the position @index in the radix tree + * @root. This is useful for update-if-exists operations. + */ +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +{ + return __lookup_slot(root, index); +} EXPORT_SYMBOL(radix_tree_lookup_slot); /** @@ -382,45 +359,13 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); * @index: index key * * Lookup the item at the position @index in the radix tree @root. - * - * This function can be called under rcu_read_lock, however the caller - * must manage lifetimes of leaf nodes (eg. RCU may also be used to free - * them safely). No RCU barriers are required to access or modify the - * returned item, however. */ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { - unsigned int height, shift; - struct radix_tree_node *node, **slot; - - node = rcu_dereference(root->rnode); - if (node == NULL) - return NULL; - - if (radix_tree_is_direct_ptr(node)) { - if (index > 0) - return NULL; - return radix_tree_direct_to_ptr(node); - } - - height = node->height; - if (index > radix_tree_maxindex(height)) - return NULL; - - shift = (height-1) * RADIX_TREE_MAP_SHIFT; + void **slot; - do { - slot = (struct radix_tree_node **) - (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); - if (node == NULL) - return NULL; - - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } while (height > 0); - - return node; + slot = __lookup_slot(root, index); + return slot != NULL ? *slot : NULL; } EXPORT_SYMBOL(radix_tree_lookup); @@ -550,30 +495,27 @@ int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag) { unsigned int height, shift; - struct radix_tree_node *node; + struct radix_tree_node *slot; int saw_unset_tag = 0; - /* check the root's tag bit */ - if (!root_tag_get(root, tag)) + height = root->height; + if (index > radix_tree_maxindex(height)) return 0; - node = rcu_dereference(root->rnode); - if (node == NULL) + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) return 0; - if (radix_tree_is_direct_ptr(node)) - return (index == 0); - - height = node->height; - if (index > radix_tree_maxindex(height)) - return 0; + if (height == 0) + return 1; shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + slot = root->rnode; for ( ; ; ) { int offset; - if (node == NULL) + if (slot == NULL) return 0; offset = (index >> shift) & RADIX_TREE_MAP_MASK; @@ -582,15 +524,15 @@ int radix_tree_tag_get(struct radix_tree_root *root, * This is just a debug check. Later, we can bale as soon as * we see an unset tag. */ - if (!tag_get(node, tag, offset)) + if (!tag_get(slot, tag, offset)) saw_unset_tag = 1; if (height == 1) { - int ret = tag_get(node, tag, offset); + int ret = tag_get(slot, tag, offset); BUG_ON(ret && saw_unset_tag); return !!ret; } - node = rcu_dereference(node->slots[offset]); + slot = slot->slots[offset]; shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -599,45 +541,47 @@ EXPORT_SYMBOL(radix_tree_tag_get); #endif static unsigned int -__lookup(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup(struct radix_tree_root *root, void **results, unsigned long index, unsigned int max_items, unsigned long *next_index) { unsigned int nr_found = 0; unsigned int shift, height; + struct radix_tree_node *slot; unsigned long i; - height = slot->height; - if (height == 0) + height = root->height; + if (height == 0) { + if (root->rnode && index == 0) + results[nr_found++] = root->rnode; goto out; + } + shift = (height-1) * RADIX_TREE_MAP_SHIFT; + slot = root->rnode; for ( ; height > 1; height--) { - i = (index >> shift) & RADIX_TREE_MAP_MASK; - for (;;) { + + for (i = (index >> shift) & RADIX_TREE_MAP_MASK ; + i < RADIX_TREE_MAP_SIZE; i++) { if (slot->slots[i] != NULL) break; index &= ~((1UL << shift) - 1); index += 1UL << shift; if (index == 0) goto out; /* 32-bit wraparound */ - i++; - if (i == RADIX_TREE_MAP_SIZE) - goto out; } + if (i == RADIX_TREE_MAP_SIZE) + goto out; shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); - if (slot == NULL) - goto out; + slot = slot->slots[i]; } /* Bottom level: grab some items */ for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { - struct radix_tree_node *node; index++; - node = slot->slots[i]; - if (node) { - results[nr_found++] = rcu_dereference(node); + if (slot->slots[i]) { + results[nr_found++] = slot->slots[i]; if (nr_found == max_items) goto out; } @@ -659,51 +603,28 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, * *@results. * * The implementation is naive. - * - * Like radix_tree_lookup, radix_tree_gang_lookup may be called under - * rcu_read_lock. In this case, rather than the returned results being - * an atomic snapshot of the tree at a single point in time, the semantics - * of an RCU protected gang lookup are as though multiple radix_tree_lookups - * have been issued in individual locks, and results stored in 'results'. */ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items) { - unsigned long max_index; - struct radix_tree_node *node; + const unsigned long max_index = radix_tree_maxindex(root->height); unsigned long cur_index = first_index; - unsigned int ret; - - node = rcu_dereference(root->rnode); - if (!node) - return 0; + unsigned int ret = 0; - if (radix_tree_is_direct_ptr(node)) { - if (first_index > 0) - return 0; - node = radix_tree_direct_to_ptr(node); - results[0] = rcu_dereference(node); - return 1; - } - - max_index = radix_tree_maxindex(node->height); - - ret = 0; while (ret < max_items) { unsigned int nr_found; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup(node, results + ret, cur_index, + nr_found = __lookup(root, results + ret, cur_index, max_items - ret, &next_index); ret += nr_found; if (next_index == 0) break; cur_index = next_index; } - return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup); @@ -713,64 +634,55 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); * open-coding the search. */ static unsigned int -__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; - unsigned int shift, height; + unsigned int shift; + unsigned int height = root->height; + struct radix_tree_node *slot; - height = slot->height; - if (height == 0) + if (height == 0) { + if (root->rnode && index == 0) + results[nr_found++] = root->rnode; goto out; - shift = (height-1) * RADIX_TREE_MAP_SHIFT; + } - while (height > 0) { - unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ; + shift = (height - 1) * RADIX_TREE_MAP_SHIFT; + slot = root->rnode; + + do { + unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK; - for (;;) { - if (tag_get(slot, tag, i)) + for ( ; i < RADIX_TREE_MAP_SIZE; i++) { + if (tag_get(slot, tag, i)) { + BUG_ON(slot->slots[i] == NULL); break; + } index &= ~((1UL << shift) - 1); index += 1UL << shift; if (index == 0) goto out; /* 32-bit wraparound */ - i++; - if (i == RADIX_TREE_MAP_SIZE) - goto out; } + if (i == RADIX_TREE_MAP_SIZE) + goto out; height--; if (height == 0) { /* Bottom level: grab some items */ unsigned long j = index & RADIX_TREE_MAP_MASK; for ( ; j < RADIX_TREE_MAP_SIZE; j++) { - struct radix_tree_node *node; index++; - if (!tag_get(slot, tag, j)) - continue; - node = slot->slots[j]; - /* - * Even though the tag was found set, we need to - * recheck that we have a non-NULL node, because - * if this lookup is lockless, it may have been - * subsequently deleted. - * - * Similar care must be taken in any place that - * lookup ->slots[x] without a lock (ie. can't - * rely on its value remaining the same). - */ - if (node) { - node = rcu_dereference(node); - results[nr_found++] = node; + if (tag_get(slot, tag, j)) { + BUG_ON(slot->slots[j] == NULL); + results[nr_found++] = slot->slots[j]; if (nr_found == max_items) goto out; } } } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); - if (slot == NULL) - break; - } + slot = slot->slots[i]; + } while (height > 0); out: *next_index = index; return nr_found; @@ -794,44 +706,27 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag) { - struct radix_tree_node *node; - unsigned long max_index; + const unsigned long max_index = radix_tree_maxindex(root->height); unsigned long cur_index = first_index; - unsigned int ret; + unsigned int ret = 0; /* check the root's tag bit */ if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); - if (!node) - return 0; - - if (radix_tree_is_direct_ptr(node)) { - if (first_index > 0) - return 0; - node = radix_tree_direct_to_ptr(node); - results[0] = rcu_dereference(node); - return 1; - } - - max_index = radix_tree_maxindex(node->height); - - ret = 0; while (ret < max_items) { unsigned int nr_found; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup_tag(node, results + ret, cur_index, + nr_found = __lookup_tag(root, results + ret, cur_index, max_items - ret, &next_index, tag); ret += nr_found; if (next_index == 0) break; cur_index = next_index; } - return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_tag); @@ -847,19 +742,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) root->rnode->count == 1 && root->rnode->slots[0]) { struct radix_tree_node *to_free = root->rnode; - void *newptr; - /* - * We don't need rcu_assign_pointer(), since we are simply - * moving the node from one part of the tree to another. If - * it was safe to dereference the old pointer to it - * (to_free->slots[0]), it will be safe to dereference the new - * one (root->rnode). - */ - newptr = to_free->slots[0]; - if (root->height == 1) - newptr = radix_tree_ptr_to_direct(newptr); - root->rnode = newptr; + root->rnode = to_free->slots[0]; root->height--; /* must only free zeroed nodes into the slab */ tag_clear(to_free, 0, 0); @@ -883,7 +767,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_node *slot = NULL; - struct radix_tree_node *to_free; unsigned int height, shift; int tag; int offset; @@ -894,7 +777,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) slot = root->rnode; if (height == 0 && root->rnode) { - slot = radix_tree_direct_to_ptr(slot); root_tag_clear_all(root); root->rnode = NULL; goto out; @@ -927,17 +809,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) radix_tree_tag_clear(root, index, tag); } - to_free = NULL; /* Now free the nodes we do not need anymore */ while (pathp->node) { pathp->node->slots[pathp->offset] = NULL; pathp->node->count--; - /* - * Queue the node for deferred freeing after the - * last reference to it disappears (set NULL, above). - */ - if (to_free) - radix_tree_node_free(to_free); if (pathp->node->count) { if (pathp->node == root->rnode) @@ -946,15 +821,13 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } /* Node with zero slots in use so free it */ - to_free = pathp->node; - pathp--; + radix_tree_node_free(pathp->node); + pathp--; } root_tag_clear_all(root); root->height = 0; root->rnode = NULL; - if (to_free) - radix_tree_node_free(to_free); out: return slot; @@ -973,7 +846,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) EXPORT_SYMBOL(radix_tree_tagged); static void -radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags) +radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags) { memset(node, 0, sizeof(struct radix_tree_node)); } @@ -996,6 +869,7 @@ static __init void radix_tree_init_maxindex(void) height_to_maxindex[i] = __maxindex(i); } +#ifdef CONFIG_HOTPLUG_CPU static int radix_tree_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -1015,6 +889,7 @@ static int radix_tree_callback(struct notifier_block *nfb, } return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ void __init radix_tree_init(void) { diff --git a/trunk/mm/allocpercpu.c b/trunk/mm/allocpercpu.c index b2486cf887a0..eaa9abeea536 100644 --- a/trunk/mm/allocpercpu.c +++ b/trunk/mm/allocpercpu.c @@ -17,9 +17,10 @@ void percpu_depopulate(void *__pdata, int cpu) { struct percpu_data *pdata = __percpu_disguise(__pdata); - - kfree(pdata->ptrs[cpu]); - pdata->ptrs[cpu] = NULL; + if (pdata->ptrs[cpu]) { + kfree(pdata->ptrs[cpu]); + pdata->ptrs[cpu] = NULL; + } } EXPORT_SYMBOL_GPL(percpu_depopulate); @@ -122,8 +123,6 @@ EXPORT_SYMBOL_GPL(__percpu_alloc_mask); */ void percpu_free(void *__pdata) { - if (unlikely(!__pdata)) - return; __percpu_depopulate_mask(__pdata, &cpu_possible_map); kfree(__percpu_disguise(__pdata)); } diff --git a/trunk/mm/bootmem.c b/trunk/mm/bootmem.c index 00a96970b237..d53112fcb404 100644 --- a/trunk/mm/bootmem.c +++ b/trunk/mm/bootmem.c @@ -27,6 +27,8 @@ unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; +EXPORT_UNUSED_SYMBOL(max_pfn); /* June 2006 */ + static LIST_HEAD(bdata_list); #ifdef CONFIG_CRASH_DUMP /* @@ -194,10 +196,6 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, if (limit && bdata->node_boot_start >= limit) return NULL; - /* on nodes without memory - bootmem_map is NULL */ - if (!bdata->node_bootmem_map) - return NULL; - end_pfn = bdata->node_low_pfn; limit = PFN_DOWN(limit); if (limit && end_pfn > limit) diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index af7e2f5caea9..13df01c50479 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1445,6 +1445,7 @@ struct page *filemap_nopage(struct vm_area_struct *area, * effect. */ error = page_cache_read(file, pgoff); + grab_swap_token(); /* * The page we want has now been added to the page cache. diff --git a/trunk/mm/fremap.c b/trunk/mm/fremap.c index b77a002c3352..7a9d0f5d246d 100644 --- a/trunk/mm/fremap.c +++ b/trunk/mm/fremap.c @@ -101,6 +101,7 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, { int err = -ENOMEM; pte_t *pte; + pte_t pte_val; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); @@ -113,6 +114,7 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, } set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); + pte_val = *pte; /* * We don't need to run update_mmu_cache() here because the "file pte" * being installed by install_file_pte() is not a real pte - it's a diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 0ccc7f230252..a088f593a807 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -109,7 +109,7 @@ static int alloc_fresh_huge_page(void) if (nid == MAX_NUMNODES) nid = first_node(node_online_map); if (page) { - set_compound_page_dtor(page, free_huge_page); + page[1].lru.next = (void *)free_huge_page; /* dtor */ spin_lock(&hugetlb_lock); nr_huge_pages++; nr_huge_pages_node[page_to_nid(page)]++; @@ -344,6 +344,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, entry = *src_pte; ptepage = pte_page(entry); get_page(ptepage); + add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); set_huge_pte_at(dst, addr, dst_pte, entry); } spin_unlock(&src->page_table_lock); @@ -364,11 +365,6 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, pte_t pte; struct page *page; struct page *tmp; - /* - * A page gathering list, protected by per file i_mmap_lock. The - * lock is used to avoid list corruption from multiple unmapping - * of the same page since we are using page->lru. - */ LIST_HEAD(page_list); WARN_ON(!is_vm_hugetlb_page(vma)); @@ -376,21 +372,24 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, BUG_ON(end & ~HPAGE_MASK); spin_lock(&mm->page_table_lock); + + /* Update high watermark before we lower rss */ + update_hiwater_rss(mm); + for (address = start; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; - if (huge_pmd_unshare(mm, &address, ptep)) - continue; - pte = huge_ptep_get_and_clear(mm, address, ptep); if (pte_none(pte)) continue; page = pte_page(pte); list_add(&page->lru, &page_list); + add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); } + spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); list_for_each_entry_safe(page, tmp, &page_list, lru) { @@ -516,6 +515,7 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*ptep)) goto backout; + add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); @@ -653,14 +653,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma, BUG_ON(address >= end); flush_cache_range(vma, address, end); - spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); spin_lock(&mm->page_table_lock); for (; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; - if (huge_pmd_unshare(mm, &address, ptep)) - continue; if (!pte_none(*ptep)) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); @@ -669,7 +666,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma, } } spin_unlock(&mm->page_table_lock); - spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); flush_tlb_range(vma, start, end); } diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 4198df0dff1c..156861fcac43 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1902,6 +1902,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) return 0; } +EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ /** * swapin_readahead - swap in pages in hope we need them soon @@ -1990,7 +1991,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { - grab_swap_token(); /* Contend for token _before_ read-in */ swapin_readahead(entry, address, vma); page = read_swap_cache_async(entry, vma, address); if (!page) { @@ -2008,6 +2008,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); + grab_swap_token(); } delayacct_clear_flag(DELAYACCT_PF_SWAPIN); diff --git a/trunk/mm/memory_hotplug.c b/trunk/mm/memory_hotplug.c index 0c055a090f4d..fd678a662eae 100644 --- a/trunk/mm/memory_hotplug.c +++ b/trunk/mm/memory_hotplug.c @@ -72,6 +72,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) return ret; } memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); + zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); return 0; } diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index b917d6fdc1bb..617fb31086ee 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -141,11 +141,9 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes) enum zone_type k; max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); - max++; /* space for zlcache_ptr (see mmzone.h) */ zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); if (!zl) return NULL; - zl->zlcache_ptr = NULL; num = 0; /* First put in the highest zones from all nodes, then all the next lower zones etc. Avoid empty zones because the memory allocator @@ -221,7 +219,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { struct page *page; - int nid; + unsigned int nid; if (!pte_present(*pte)) continue; @@ -1326,7 +1324,7 @@ struct mempolicy *__mpol_copy(struct mempolicy *old) atomic_set(&new->refcnt, 1); if (new->policy == MPOL_BIND) { int sz = ksize(old->v.zonelist); - new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL); + new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL); if (!new->v.zonelist) { kmem_cache_free(policy_cache, new); return ERR_PTR(-ENOMEM); @@ -1707,8 +1705,8 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) * Display pages allocated per node and memory policy via /proc. */ -static const char * const policy_types[] = - { "default", "prefer", "bind", "interleave" }; +static const char *policy_types[] = { "default", "prefer", "bind", + "interleave" }; /* * Convert a mempolicy into a string. diff --git a/trunk/mm/migrate.c b/trunk/mm/migrate.c index e9b161bde95b..b4979d423d2b 100644 --- a/trunk/mm/migrate.c +++ b/trunk/mm/migrate.c @@ -294,7 +294,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, static int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { - void **pslot; + struct page **radix_pointer; if (!mapping) { /* Anonymous page */ @@ -305,11 +305,12 @@ static int migrate_page_move_mapping(struct address_space *mapping, write_lock_irq(&mapping->tree_lock); - pslot = radix_tree_lookup_slot(&mapping->page_tree, - page_index(page)); + radix_pointer = (struct page **)radix_tree_lookup_slot( + &mapping->page_tree, + page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || - (struct page *)radix_tree_deref_slot(pslot) != page) { + *radix_pointer != page) { write_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -317,7 +318,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, /* * Now we know that no one else is looking at the page. */ - get_page(newpage); /* add cache reference */ + get_page(newpage); #ifdef CONFIG_SWAP if (PageSwapCache(page)) { SetPageSwapCache(newpage); @@ -325,14 +326,8 @@ static int migrate_page_move_mapping(struct address_space *mapping, } #endif - radix_tree_replace_slot(pslot, newpage); - - /* - * Drop cache reference from old page. - * We know this isn't the last reference. - */ + *radix_pointer = newpage; __put_page(page); - write_unlock_irq(&mapping->tree_lock); return 0; diff --git a/trunk/mm/mlock.c b/trunk/mm/mlock.c index 3446b7ef731e..b90c59573abf 100644 --- a/trunk/mm/mlock.c +++ b/trunk/mm/mlock.c @@ -65,7 +65,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, ret = make_pages_present(start, end); } - mm->locked_vm -= pages; + vma->vm_mm->locked_vm -= pages; out: if (ret == -ENOMEM) ret = -EAGAIN; diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c index 7be110e98d4c..7b40abd7cba2 100644 --- a/trunk/mm/mmap.c +++ b/trunk/mm/mmap.c @@ -1736,7 +1736,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!new) return -ENOMEM; @@ -2057,7 +2057,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, vma_start < new_vma->vm_end) *vmap = new_vma; } else { - new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (new_vma) { *new_vma = *vma; pol = mpol_copy(vma_policy(vma)); diff --git a/trunk/mm/mmzone.c b/trunk/mm/mmzone.c index eb5838634f18..febea1c98168 100644 --- a/trunk/mm/mmzone.c +++ b/trunk/mm/mmzone.c @@ -14,6 +14,8 @@ struct pglist_data *first_online_pgdat(void) return NODE_DATA(first_online_node); } +EXPORT_UNUSED_SYMBOL(first_online_pgdat); /* June 2006 */ + struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); @@ -22,6 +24,8 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) return NULL; return NODE_DATA(nid); } +EXPORT_UNUSED_SYMBOL(next_online_pgdat); /* June 2006 */ + /* * next_zone - helper magic for for_each_zone() @@ -41,4 +45,5 @@ struct zone *next_zone(struct zone *zone) } return zone; } +EXPORT_UNUSED_SYMBOL(next_zone); /* June 2006 */ diff --git a/trunk/mm/nommu.c b/trunk/mm/nommu.c index af874569d0f1..8bdde9508f3b 100644 --- a/trunk/mm/nommu.c +++ b/trunk/mm/nommu.c @@ -497,17 +497,15 @@ static int validate_mmap_request(struct file *file, (flags & MAP_TYPE) != MAP_SHARED) return -EINVAL; - if (!len) - return -EINVAL; + if (PAGE_ALIGN(len) == 0) + return addr; - /* Careful about overflows.. */ - len = PAGE_ALIGN(len); - if (!len || len > TASK_SIZE) - return -ENOMEM; + if (len > TASK_SIZE) + return -EINVAL; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) - return -EOVERFLOW; + return -EINVAL; if (file) { /* validate file mapping requests */ @@ -808,9 +806,10 @@ unsigned long do_mmap_pgoff(struct file *file, vm_flags = determine_vm_flags(file, prot, flags, capabilities); /* we're going to need to record the mapping if it works */ - vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL); + vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL); if (!vml) goto error_getting_vml; + memset(vml, 0, sizeof(*vml)); down_write(&nommu_vma_sem); @@ -886,10 +885,11 @@ unsigned long do_mmap_pgoff(struct file *file, } /* we're going to need a VMA struct as well */ - vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL); + vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); if (!vma) goto error_getting_vma; + memset(vma, 0, sizeof(*vma)); INIT_LIST_HEAD(&vma->anon_vma_node); atomic_set(&vma->vm_usage, 1); if (file) diff --git a/trunk/mm/oom_kill.c b/trunk/mm/oom_kill.c index 223d9ccb7d64..2e3ce3a928b9 100644 --- a/trunk/mm/oom_kill.c +++ b/trunk/mm/oom_kill.c @@ -264,7 +264,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO * set. */ -static void __oom_kill_task(struct task_struct *p, int verbose) +static void __oom_kill_task(struct task_struct *p, const char *message) { if (is_init(p)) { WARN_ON(1); @@ -278,8 +278,10 @@ static void __oom_kill_task(struct task_struct *p, int verbose) return; } - if (verbose) - printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm); + if (message) { + printk(KERN_ERR "%s: Killed process %d (%s).\n", + message, p->pid, p->comm); + } /* * We give our sacrificial lamb high priority and access to @@ -292,7 +294,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose) force_sig(SIGKILL, p); } -static int oom_kill_task(struct task_struct *p) +static int oom_kill_task(struct task_struct *p, const char *message) { struct mm_struct *mm; struct task_struct *g, *q; @@ -311,25 +313,15 @@ static int oom_kill_task(struct task_struct *p) if (mm == NULL) return 1; - /* - * Don't kill the process if any threads are set to OOM_DISABLE - */ - do_each_thread(g, q) { - if (q->mm == mm && p->oomkilladj == OOM_DISABLE) - return 1; - } while_each_thread(g, q); - - __oom_kill_task(p, 1); - + __oom_kill_task(p, message); /* * kill all processes that share the ->mm (i.e. all threads), - * but are in a different thread group. Don't let them have access - * to memory reserves though, otherwise we might deplete all memory. + * but are in a different thread group */ - do_each_thread(g, q) { + do_each_thread(g, q) if (q->mm == mm && q->tgid != p->tgid) - force_sig(SIGKILL, p); - } while_each_thread(g, q); + __oom_kill_task(q, message); + while_each_thread(g, q); return 0; } @@ -345,22 +337,21 @@ static int oom_kill_process(struct task_struct *p, unsigned long points, * its children or threads, just set TIF_MEMDIE so it can die quickly */ if (p->flags & PF_EXITING) { - __oom_kill_task(p, 0); + __oom_kill_task(p, NULL); return 0; } - printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", - message, p->pid, p->comm, points); - + printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li" + " and children.\n", p->pid, p->comm, points); /* Try to kill a child first */ list_for_each(tsk, &p->children) { c = list_entry(tsk, struct task_struct, sibling); if (c->mm == p->mm) continue; - if (!oom_kill_task(c)) + if (!oom_kill_task(c, message)) return 0; } - return oom_kill_task(p); + return oom_kill_task(p, message); } static BLOCKING_NOTIFIER_HEAD(oom_notify_list); diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index cace22b3ac25..aa6fcc7ca66f 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -83,7 +83,14 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { EXPORT_SYMBOL(totalram_pages); -static char * const zone_names[MAX_NR_ZONES] = { +/* + * Used by page_zone() to look up the address of the struct zone whose + * id is encoded in the upper bits of page->flags + */ +struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; +EXPORT_SYMBOL(zone_table); + +static char *zone_names[MAX_NR_ZONES] = { "DMA", #ifdef CONFIG_ZONE_DMA32 "DMA32", @@ -230,7 +237,7 @@ static void prep_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - set_compound_page_dtor(page, free_compound_page); + page[1].lru.next = (void *)free_compound_page; /* set dtor */ page[1].lru.prev = (void *)order; for (i = 0; i < nr_pages; i++) { struct page *p = page + i; @@ -479,7 +486,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order) spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; - __free_one_page(page, zone, order); + __free_one_page(page, zone ,order); spin_unlock(&zone->lock); } @@ -598,8 +605,6 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 1 << PG_checked | 1 << PG_mappedtodisk); set_page_private(page, 0); set_page_refcounted(page); - - arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); if (gfp_flags & __GFP_ZERO) @@ -685,15 +690,9 @@ void drain_node_pages(int nodeid) pcp = &pset->pcp[i]; if (pcp->count) { - int to_drain; - local_irq_save(flags); - if (pcp->count >= pcp->batch) - to_drain = pcp->batch; - else - to_drain = pcp->count; - free_pages_bulk(zone, to_drain, &pcp->list, 0); - pcp->count -= to_drain; + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + pcp->count = 0; local_irq_restore(flags); } } @@ -701,6 +700,7 @@ void drain_node_pages(int nodeid) } #endif +#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) static void __drain_pages(unsigned int cpu) { unsigned long flags; @@ -722,6 +722,7 @@ static void __drain_pages(unsigned int cpu) } } } +#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PM @@ -924,160 +925,31 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, return 1; } -#ifdef CONFIG_NUMA -/* - * zlc_setup - Setup for "zonelist cache". Uses cached zone data to - * skip over zones that are not allowed by the cpuset, or that have - * been recently (in last second) found to be nearly full. See further - * comments in mmzone.h. Reduces cache footprint of zonelist scans - * that have to skip over alot of full or unallowed zones. - * - * If the zonelist cache is present in the passed in zonelist, then - * returns a pointer to the allowed node mask (either the current - * tasks mems_allowed, or node_online_map.) - * - * If the zonelist cache is not available for this zonelist, does - * nothing and returns NULL. - * - * If the fullzones BITMAP in the zonelist cache is stale (more than - * a second since last zap'd) then we zap it out (clear its bits.) - * - * We hold off even calling zlc_setup, until after we've checked the - * first zone in the zonelist, on the theory that most allocations will - * be satisfied from that first zone, so best to examine that zone as - * quickly as we can. - */ -static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) -{ - struct zonelist_cache *zlc; /* cached zonelist speedup info */ - nodemask_t *allowednodes; /* zonelist_cache approximation */ - - zlc = zonelist->zlcache_ptr; - if (!zlc) - return NULL; - - if (jiffies - zlc->last_full_zap > 1 * HZ) { - bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); - zlc->last_full_zap = jiffies; - } - - allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? - &cpuset_current_mems_allowed : - &node_online_map; - return allowednodes; -} - -/* - * Given 'z' scanning a zonelist, run a couple of quick checks to see - * if it is worth looking at further for free memory: - * 1) Check that the zone isn't thought to be full (doesn't have its - * bit set in the zonelist_cache fullzones BITMAP). - * 2) Check that the zones node (obtained from the zonelist_cache - * z_to_n[] mapping) is allowed in the passed in allowednodes mask. - * Return true (non-zero) if zone is worth looking at further, or - * else return false (zero) if it is not. - * - * This check -ignores- the distinction between various watermarks, - * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is - * found to be full for any variation of these watermarks, it will - * be considered full for up to one second by all requests, unless - * we are so low on memory on all allowed nodes that we are forced - * into the second scan of the zonelist. - * - * In the second scan we ignore this zonelist cache and exactly - * apply the watermarks to all zones, even it is slower to do so. - * We are low on memory in the second scan, and should leave no stone - * unturned looking for a free page. - */ -static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, - nodemask_t *allowednodes) -{ - struct zonelist_cache *zlc; /* cached zonelist speedup info */ - int i; /* index of *z in zonelist zones */ - int n; /* node that zone *z is on */ - - zlc = zonelist->zlcache_ptr; - if (!zlc) - return 1; - - i = z - zonelist->zones; - n = zlc->z_to_n[i]; - - /* This zone is worth trying if it is allowed but not full */ - return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); -} - /* - * Given 'z' scanning a zonelist, set the corresponding bit in - * zlc->fullzones, so that subsequent attempts to allocate a page - * from that zone don't waste time re-examining it. - */ -static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) -{ - struct zonelist_cache *zlc; /* cached zonelist speedup info */ - int i; /* index of *z in zonelist zones */ - - zlc = zonelist->zlcache_ptr; - if (!zlc) - return; - - i = z - zonelist->zones; - - set_bit(i, zlc->fullzones); -} - -#else /* CONFIG_NUMA */ - -static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) -{ - return NULL; -} - -static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, - nodemask_t *allowednodes) -{ - return 1; -} - -static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) -{ -} -#endif /* CONFIG_NUMA */ - -/* - * get_page_from_freelist goes through the zonelist trying to allocate + * get_page_from_freeliest goes through the zonelist trying to allocate * a page. */ static struct page * get_page_from_freelist(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, int alloc_flags) { - struct zone **z; + struct zone **z = zonelist->zones; struct page *page = NULL; - int classzone_idx = zone_idx(zonelist->zones[0]); + int classzone_idx = zone_idx(*z); struct zone *zone; - nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ - int zlc_active = 0; /* set if using zonelist_cache */ - int did_zlc_setup = 0; /* just call zlc_setup() one time */ -zonelist_scan: /* - * Scan zonelist, looking for a zone with enough free. + * Go through the zonelist once, looking for a zone with enough free. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ - z = zonelist->zones; - do { - if (NUMA_BUILD && zlc_active && - !zlc_zone_worth_trying(zonelist, z, allowednodes)) - continue; zone = *z; if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) break; if ((alloc_flags & ALLOC_CPUSET) && - !cpuset_zone_allowed(zone, gfp_mask)) - goto try_next_zone; + !cpuset_zone_allowed(zone, gfp_mask)) + continue; if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { unsigned long mark; @@ -1087,34 +959,18 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, mark = zone->pages_low; else mark = zone->pages_high; - if (!zone_watermark_ok(zone, order, mark, - classzone_idx, alloc_flags)) { + if (!zone_watermark_ok(zone , order, mark, + classzone_idx, alloc_flags)) if (!zone_reclaim_mode || !zone_reclaim(zone, gfp_mask, order)) - goto this_zone_full; - } + continue; } page = buffered_rmqueue(zonelist, zone, order, gfp_mask); - if (page) + if (page) { break; -this_zone_full: - if (NUMA_BUILD) - zlc_mark_zone_full(zonelist, z); -try_next_zone: - if (NUMA_BUILD && !did_zlc_setup) { - /* we do zlc_setup after the first zone is tried */ - allowednodes = zlc_setup(zonelist, alloc_flags); - zlc_active = 1; - did_zlc_setup = 1; } } while (*(++z) != NULL); - - if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { - /* Disable zlc cache for second zonelist scan */ - zlc_active = 0; - goto zonelist_scan; - } return page; } @@ -1149,19 +1005,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, if (page) goto got_pg; - /* - * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and - * __GFP_NOWARN set) should not cause reclaim since the subsystem - * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim - * using a larger set of nodes after it has established that the - * allowed per node queues are empty and that nodes are - * over allocated. - */ - if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) - goto nopage; - - for (z = zonelist->zones; *z; z++) + do { wakeup_kswapd(*z, order); + } while (*(++z)); /* * OK, we're below the kswapd watermark and have kicked background @@ -1195,7 +1041,6 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, /* This allocation should allow future memory freeing. */ -rebalance: if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) { if (!(gfp_mask & __GFP_NOMEMALLOC)) { @@ -1217,6 +1062,7 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, if (!wait) goto nopage; +rebalance: cond_resched(); /* We now go into synchronous reclaim */ @@ -1416,7 +1262,7 @@ unsigned int nr_free_pagecache_pages(void) static inline void show_node(struct zone *zone) { if (NUMA_BUILD) - printk("Node %d ", zone_to_nid(zone)); + printk("Node %ld ", zone_to_nid(zone)); } void si_meminfo(struct sysinfo *val) @@ -1696,24 +1542,6 @@ static void __meminit build_zonelists(pg_data_t *pgdat) } } -/* Construct the zonelist performance cache - see further mmzone.h */ -static void __meminit build_zonelist_cache(pg_data_t *pgdat) -{ - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) { - struct zonelist *zonelist; - struct zonelist_cache *zlc; - struct zone **z; - - zonelist = pgdat->node_zonelists + i; - zonelist->zlcache_ptr = zlc = &zonelist->zlcache; - bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); - for (z = zonelist->zones; *z; z++) - zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); - } -} - #else /* CONFIG_NUMA */ static void __meminit build_zonelists(pg_data_t *pgdat) @@ -1751,26 +1579,14 @@ static void __meminit build_zonelists(pg_data_t *pgdat) } } -/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ -static void __meminit build_zonelist_cache(pg_data_t *pgdat) -{ - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) - pgdat->node_zonelists[i].zlcache_ptr = NULL; -} - #endif /* CONFIG_NUMA */ /* return values int ....just for stop_machine_run() */ static int __meminit __build_all_zonelists(void *dummy) { int nid; - - for_each_online_node(nid) { + for_each_online_node(nid) build_zonelists(NODE_DATA(nid)); - build_zonelist_cache(NODE_DATA(nid)); - } return 0; } @@ -1899,6 +1715,20 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, } } +#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) +void zonetable_add(struct zone *zone, int nid, enum zone_type zid, + unsigned long pfn, unsigned long size) +{ + unsigned long snum = pfn_to_section_nr(pfn); + unsigned long end = pfn_to_section_nr(pfn + size); + + if (FLAGS_HAS_NODE) + zone_table[ZONETABLE_INDEX(nid, zid)] = zone; + else + for (; snum <= end; snum++) + zone_table[ZONETABLE_INDEX(snum, zid)] = zone; +} + #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ memmap_init_zone((size), (nid), (zone), (start_pfn)) @@ -2051,16 +1881,16 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, int ret = NOTIFY_OK; switch (action) { - case CPU_UP_PREPARE: - if (process_zones(cpu)) - ret = NOTIFY_BAD; - break; - case CPU_UP_CANCELED: - case CPU_DEAD: - free_zone_pagesets(cpu); - break; - default: - break; + case CPU_UP_PREPARE: + if (process_zones(cpu)) + ret = NOTIFY_BAD; + break; + case CPU_UP_CANCELED: + case CPU_DEAD: + free_zone_pagesets(cpu); + break; + default: + break; } return ret; } @@ -2591,6 +2421,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, if (!size) continue; + zonetable_add(zone, nid, j, zone_start_pfn, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size); BUG_ON(ret); zone_start_pfn += size; @@ -2905,6 +2736,7 @@ void __init free_area_init(unsigned long *zones_size) __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); } +#ifdef CONFIG_HOTPLUG_CPU static int page_alloc_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -2919,6 +2751,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, } return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ void __init page_alloc_init(void) { @@ -3222,7 +3055,7 @@ void *__init alloc_large_system_hash(const char *tablename, /* allow the kernel cmdline to have a say */ if (!numentries) { /* round applicable memory size up to nearest megabyte */ - numentries = nr_kernel_pages; + numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; numentries += (1UL << (20 - PAGE_SHIFT)) - 1; numentries >>= 20 - PAGE_SHIFT; numentries <<= 20 - PAGE_SHIFT; diff --git a/trunk/mm/page_io.c b/trunk/mm/page_io.c index dbffec0d78c9..d4840ecbf8f9 100644 --- a/trunk/mm/page_io.c +++ b/trunk/mm/page_io.c @@ -147,3 +147,48 @@ int swap_readpage(struct file *file, struct page *page) out: return ret; } + +#ifdef CONFIG_SOFTWARE_SUSPEND +/* + * A scruffy utility function to read or write an arbitrary swap page + * and wait on the I/O. The caller must have a ref on the page. + * + * We use end_swap_bio_read() even for writes, because it happens to do what + * we want. + */ +int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page, + struct bio **bio_chain) +{ + struct bio *bio; + int ret = 0; + int bio_rw; + + lock_page(page); + + bio = get_swap_bio(GFP_KERNEL, entry.val, page, end_swap_bio_read); + if (bio == NULL) { + unlock_page(page); + ret = -ENOMEM; + goto out; + } + + bio_rw = rw; + if (!bio_chain) + bio_rw |= (1 << BIO_RW_SYNC); + if (bio_chain) + bio_get(bio); + submit_bio(bio_rw, bio); + if (bio_chain == NULL) { + wait_on_page_locked(page); + + if (!PageUptodate(page) || PageError(page)) + ret = -EIO; + } + if (bio_chain) { + bio->bi_private = *bio_chain; + *bio_chain = bio; + } +out: + return ret; +} +#endif diff --git a/trunk/mm/pdflush.c b/trunk/mm/pdflush.c index 8ce0900dc95c..b02102feeb4b 100644 --- a/trunk/mm/pdflush.c +++ b/trunk/mm/pdflush.c @@ -21,7 +21,6 @@ #include // Prototypes pdflush_operation() #include #include -#include /* diff --git a/trunk/mm/readahead.c b/trunk/mm/readahead.c index a386f2b6b335..23cb61a01c6e 100644 --- a/trunk/mm/readahead.c +++ b/trunk/mm/readahead.c @@ -148,7 +148,13 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); if (ret) { - put_pages_list(pages); + while (!list_empty(pages)) { + struct page *victim; + + victim = list_to_page(pages); + list_del(&victim->lru); + page_cache_release(victim); + } break; } } diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index c820b4f77b8d..4959535fc14c 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -177,7 +177,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) static struct super_operations shmem_ops; static const struct address_space_operations shmem_aops; -static const struct file_operations shmem_file_operations; +static struct file_operations shmem_file_operations; static struct inode_operations shmem_inode_operations; static struct inode_operations shmem_dir_inode_operations; static struct inode_operations shmem_special_inode_operations; @@ -1943,7 +1943,7 @@ static int shmem_xattr_security_set(struct inode *inode, const char *name, return security_inode_setsecurity(inode, name, value, size, flags); } -static struct xattr_handler shmem_xattr_security_handler = { +struct xattr_handler shmem_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .list = shmem_xattr_security_list, .get = shmem_xattr_security_get, @@ -2263,7 +2263,7 @@ static struct kmem_cache *shmem_inode_cachep; static struct inode *shmem_alloc_inode(struct super_block *sb) { struct shmem_inode_info *p; - p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); + p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); if (!p) return NULL; return &p->vfs_inode; @@ -2319,7 +2319,7 @@ static const struct address_space_operations shmem_aops = { .migratepage = migrate_page, }; -static const struct file_operations shmem_file_operations = { +static struct file_operations shmem_file_operations = { .mmap = shmem_mmap, #ifdef CONFIG_TMPFS .llseek = generic_file_llseek, diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index 068cb4503c15..3c4a7e34eddc 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -103,12 +103,12 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include @@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); static int enable_cpucache(struct kmem_cache *cachep); -static void cache_reap(struct work_struct *unused); +static void cache_reap(void *unused); /* * This function must be completely optimized away if a constant is passed to @@ -730,10 +730,7 @@ static inline void init_lock_keys(void) } #endif -/* - * 1. Guard access to the cache-chain. - * 2. Protect sanity of cpu_online_map against cpu hotplug events - */ +/* Guard access to the cache-chain. */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; @@ -756,7 +753,7 @@ int slab_is_available(void) return g_cpucache_up == FULL; } -static DEFINE_PER_CPU(struct delayed_work, reap_work); +static DEFINE_PER_CPU(struct work_struct, reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { @@ -869,22 +866,6 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, dump_stack(); } -/* - * By default on NUMA we use alien caches to stage the freeing of - * objects allocated from other nodes. This causes massive memory - * inefficiencies when using fake NUMA setup to split memory into a - * large number of small nodes, so it can be disabled on the command - * line - */ - -static int use_alien_caches __read_mostly = 1; -static int __init noaliencache_setup(char *s) -{ - use_alien_caches = 0; - return 1; -} -__setup("noaliencache", noaliencache_setup); - #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). @@ -935,16 +916,16 @@ static void next_reap_node(void) */ static void __devinit start_cpu_timer(int cpu) { - struct delayed_work *reap_work = &per_cpu(reap_work, cpu); + struct work_struct *reap_work = &per_cpu(reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), * init_workqueues() has already run, so keventd will be setup * at that time. */ - if (keventd_up() && reap_work->work.func == NULL) { + if (keventd_up() && reap_work->func == NULL) { init_reap_node(cpu); - INIT_DELAYED_WORK(reap_work, cache_reap); + INIT_WORK(reap_work, cache_reap, NULL); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); } } @@ -1015,7 +996,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep, return NULL; } -static inline void *____cache_alloc_node(struct kmem_cache *cachep, +static inline void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return NULL; @@ -1023,7 +1004,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep, #else /* CONFIG_NUMA */ -static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); +static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static struct array_cache **alloc_alien_cache(int node, int limit) @@ -1133,7 +1114,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ - if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) + if (likely(slabp->nodeid == node)) return 0; l3 = cachep->nodelists[node]; @@ -1211,7 +1192,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien = NULL; + struct array_cache **alien; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); @@ -1223,11 +1204,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, if (!shared) goto bad; - if (use_alien_caches) { - alien = alloc_alien_cache(node, cachep->limit); - if (!alien) - goto bad; - } + alien = alloc_alien_cache(node, cachep->limit); + if (!alien) + goto bad; cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); @@ -1251,18 +1230,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, kfree(shared); free_alien_cache(alien); } + mutex_unlock(&cache_chain_mutex); break; case CPU_ONLINE: - mutex_unlock(&cache_chain_mutex); start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU - case CPU_DOWN_PREPARE: - mutex_lock(&cache_chain_mutex); - break; - case CPU_DOWN_FAILED: - mutex_unlock(&cache_chain_mutex); - break; case CPU_DEAD: /* * Even if all the cpus of a node are down, we don't free the @@ -1273,8 +1246,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, * gets destroyed at kmem_cache_destroy(). */ /* fall thru */ -#endif case CPU_UP_CANCELED: + mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; @@ -1335,9 +1308,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, } mutex_unlock(&cache_chain_mutex); break; +#endif } return NOTIFY_OK; bad: + mutex_unlock(&cache_chain_mutex); return NOTIFY_BAD; } @@ -1605,7 +1580,12 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) flags |= __GFP_COMP; #endif - flags |= cachep->gfpflags; + /* + * Under NUMA we want memory on the indicated node. We will handle + * the needed fallback ourselves since we want to serve from our + * per node object lists first for other nodes. + */ + flags |= cachep->gfpflags | GFP_THISNODE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) @@ -2118,12 +2098,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, } /* - * We use cache_chain_mutex to ensure a consistent view of - * cpu_online_map as well. Please see cpuup_callback + * Prevent CPUs from coming and going. + * lock_cpu_hotplug() nests outside cache_chain_mutex */ + lock_cpu_hotplug(); + mutex_lock(&cache_chain_mutex); list_for_each_entry(pc, &cache_chain, next) { + mm_segment_t old_fs = get_fs(); char tmp; int res; @@ -2132,7 +2115,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, * destroy its slab cache and no-one else reuses the vmalloc * area of the module. Print a warning. */ - res = probe_kernel_address(pc->name, tmp); + set_fs(KERNEL_DS); + res = __get_user(tmp, pc->name); + set_fs(old_fs); if (res) { printk("SLAB: cache with size %d has lost its name\n", pc->buffer_size); @@ -2212,24 +2197,25 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) ralign = BYTES_PER_WORD; - /* 2) arch mandated alignment */ + /* 2) arch mandated alignment: disables debug if necessary */ if (ralign < ARCH_SLAB_MINALIGN) { ralign = ARCH_SLAB_MINALIGN; + if (ralign > BYTES_PER_WORD) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } - /* 3) caller mandated alignment */ + /* 3) caller mandated alignment: disables debug if necessary */ if (ralign < align) { ralign = align; + if (ralign > BYTES_PER_WORD) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } - /* disable debug if necessary */ - if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. */ align = ralign; /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); + cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); if (!cachep) goto oops; @@ -2340,6 +2326,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, panic("kmem_cache_create(): failed to create slab `%s'\n", name); mutex_unlock(&cache_chain_mutex); + unlock_cpu_hotplug(); return cachep; } EXPORT_SYMBOL(kmem_cache_create); @@ -2457,7 +2444,6 @@ static int drain_freelist(struct kmem_cache *cache, return nr_freed; } -/* Called with cache_chain_mutex held to protect against cpu hotplug */ static int __cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; @@ -2488,13 +2474,9 @@ static int __cache_shrink(struct kmem_cache *cachep) */ int kmem_cache_shrink(struct kmem_cache *cachep) { - int ret; BUG_ON(!cachep || in_interrupt()); - mutex_lock(&cache_chain_mutex); - ret = __cache_shrink(cachep); - mutex_unlock(&cache_chain_mutex); - return ret; + return __cache_shrink(cachep); } EXPORT_SYMBOL(kmem_cache_shrink); @@ -2518,16 +2500,23 @@ void kmem_cache_destroy(struct kmem_cache *cachep) { BUG_ON(!cachep || in_interrupt()); + /* Don't let CPUs to come and go */ + lock_cpu_hotplug(); + /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); /* * the chain is never empty, cache_cache is never destroyed */ list_del(&cachep->next); + mutex_unlock(&cache_chain_mutex); + if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); + mutex_lock(&cache_chain_mutex); list_add(&cachep->next, &cache_chain); mutex_unlock(&cache_chain_mutex); + unlock_cpu_hotplug(); return; } @@ -2535,7 +2524,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) synchronize_rcu(); __kmem_cache_destroy(cachep); - mutex_unlock(&cache_chain_mutex); + unlock_cpu_hotplug(); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -2559,7 +2548,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, - local_flags & ~GFP_THISNODE, nodeid); + local_flags, nodeid); if (!slabp) return NULL; } else { @@ -2629,7 +2618,7 @@ static void cache_init_objs(struct kmem_cache *cachep, static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (flags & GFP_DMA) + if (flags & SLAB_DMA) BUG_ON(!(cachep->gfpflags & GFP_DMA)); else BUG_ON(cachep->gfpflags & GFP_DMA); @@ -2700,10 +2689,10 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(struct kmem_cache *cachep, - gfp_t flags, int nodeid, void *objp) +static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct slab *slabp; + void *objp; size_t offset; gfp_t local_flags; unsigned long ctor_flags; @@ -2713,12 +2702,12 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); - if (flags & __GFP_NO_GROW) + BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); + if (flags & SLAB_NO_GROW) return 0; ctor_flags = SLAB_CTOR_CONSTRUCTOR; - local_flags = (flags & GFP_LEVEL_MASK); + local_flags = (flags & SLAB_LEVEL_MASK); if (!(local_flags & __GFP_WAIT)) /* * Not allowed to sleep. Need to tell a constructor about @@ -2755,14 +2744,12 @@ static int cache_grow(struct kmem_cache *cachep, * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ - if (!objp) - objp = kmem_getpages(cachep, flags, nodeid); + objp = kmem_getpages(cachep, flags, nodeid); if (!objp) goto failed; /* Get slab management. */ - slabp = alloc_slabmgmt(cachep, objp, offset, - local_flags & ~GFP_THISNODE, nodeid); + slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); if (!slabp) goto opps1; @@ -3000,7 +2987,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); + x = cache_grow(cachep, flags, node); /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); @@ -3076,12 +3063,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, cachep->ctor(objp, cachep, ctor_flags); } -#if ARCH_SLAB_MINALIGN - if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { - printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", - objp, ARCH_SLAB_MINALIGN); - } -#endif return objp; } #else @@ -3124,10 +3105,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, objp = ____cache_alloc(cachep, flags); /* * We may just have run out of memory on the local node. - * ____cache_alloc_node() knows how to locate memory on other nodes + * __cache_alloc_node() knows how to locate memory on other nodes */ if (NUMA_BUILD && !objp) - objp = ____cache_alloc_node(cachep, flags, numa_node_id()); + objp = __cache_alloc_node(cachep, flags, numa_node_id()); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); @@ -3154,17 +3135,15 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) else if (current->mempolicy) nid_alloc = slab_node(current->mempolicy); if (nid_alloc != nid_here) - return ____cache_alloc_node(cachep, flags, nid_alloc); + return __cache_alloc_node(cachep, flags, nid_alloc); return NULL; } /* * Fallback function if there was no memory available and no objects on a - * certain node and fall back is permitted. First we scan all the - * available nodelists for available objects. If that fails then we - * perform an allocation without specifying a node. This allows the page - * allocator to do its reclaim / fallback magic. We then insert the - * slab into the proper nodelist and then allocate from it. + * certain node and we are allowed to fall back. We mimick the behavior of + * the page allocator. We fall back according to a zonelist determined by + * the policy layer while obeying cpuset constraints. */ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) { @@ -3172,51 +3151,15 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ->node_zonelists[gfp_zone(flags)]; struct zone **z; void *obj = NULL; - int nid; -retry: - /* - * Look through allowed nodes for objects available - * from existing per node queues. - */ for (z = zonelist->zones; *z && !obj; z++) { - nid = zone_to_nid(*z); - - if (cpuset_zone_allowed(*z, flags) && - cache->nodelists[nid] && - cache->nodelists[nid]->free_objects) - obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); - } + int nid = zone_to_nid(*z); - if (!obj) { - /* - * This allocation will be performed within the constraints - * of the current cpuset / memory policy requirements. - * We may trigger various forms of reclaim on the allowed - * set and go into memory reserves if necessary. - */ - obj = kmem_getpages(cache, flags, -1); - if (obj) { - /* - * Insert into the appropriate per node queues - */ - nid = page_to_nid(virt_to_page(obj)); - if (cache_grow(cache, flags, nid, obj)) { - obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); - if (!obj) - /* - * Another processor may allocate the - * objects in the slab since we are - * not holding any locks. - */ - goto retry; - } else { - kmem_freepages(cache, obj); - obj = NULL; - } - } + if (zone_idx(*z) <= ZONE_NORMAL && + cpuset_zone_allowed(*z, flags) && + cache->nodelists[nid]) + obj = __cache_alloc_node(cache, + flags | __GFP_THISNODE, nid); } return obj; } @@ -3224,7 +3167,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) /* * A interface to enable slab creation on nodeid */ -static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, +static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct list_head *entry; @@ -3273,7 +3216,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, must_grow: spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); + x = cache_grow(cachep, flags, nodeid); if (x) goto retry; @@ -3491,59 +3434,35 @@ int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) * @flags: See kmalloc(). * @nodeid: node number of the target node. * - * Identical to kmem_cache_alloc but it will allocate memory on the given - * node, which can improve the performance for cpu bound structures. - * - * Fallback to other node is possible if __GFP_THISNODE is not set. + * Identical to kmem_cache_alloc, except that this function is slow + * and can sleep. And it will allocate memory on the given node, which + * can improve the performance for cpu bound structures. + * New and improved: it will now make sure that the object gets + * put on the correct node list so that there is no false sharing. */ -static __always_inline void * -__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - int nodeid, void *caller) +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { unsigned long save_flags; - void *ptr = NULL; + void *ptr; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (unlikely(nodeid == -1)) - nodeid = numa_node_id(); - - if (likely(cachep->nodelists[nodeid])) { - if (nodeid == numa_node_id()) { - /* - * Use the locally cached objects if possible. - * However ____cache_alloc does not allow fallback - * to other nodes. It may fail while we still have - * objects on other nodes available. - */ - ptr = ____cache_alloc(cachep, flags); - } - if (!ptr) { - /* ___cache_alloc_node can fall back to other nodes */ - ptr = ____cache_alloc_node(cachep, flags, nodeid); - } - } else { - /* Node not bootstrapped yet */ - if (!(flags & __GFP_THISNODE)) - ptr = fallback_alloc(cachep, flags); - } - + if (nodeid == -1 || nodeid == numa_node_id() || + !cachep->nodelists[nodeid]) + ptr = ____cache_alloc(cachep, flags); + else + ptr = __cache_alloc_node(cachep, flags, nodeid); local_irq_restore(save_flags); - ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - return ptr; -} + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, + __builtin_return_address(0)); -void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) -{ - return __cache_alloc_node(cachep, flags, nodeid, - __builtin_return_address(0)); + return ptr; } EXPORT_SYMBOL(kmem_cache_alloc_node); -static __always_inline void * -__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) +void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *cachep; @@ -3552,29 +3471,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) return NULL; return kmem_cache_alloc_node(cachep, flags, node); } - -#ifdef CONFIG_DEBUG_SLAB -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, - __builtin_return_address(0)); -} EXPORT_SYMBOL(__kmalloc_node); - -void *__kmalloc_node_track_caller(size_t size, gfp_t flags, - int node, void *caller) -{ - return __do_kmalloc_node(size, flags, node, caller); -} -EXPORT_SYMBOL(__kmalloc_node_track_caller); -#else -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, NULL); -} -EXPORT_SYMBOL(__kmalloc_node); -#endif /* CONFIG_DEBUG_SLAB */ -#endif /* CONFIG_NUMA */ +#endif /** * __do_kmalloc - allocate memory @@ -3685,15 +3583,13 @@ static int alloc_kmemlist(struct kmem_cache *cachep) int node; struct kmem_list3 *l3; struct array_cache *new_shared; - struct array_cache **new_alien = NULL; + struct array_cache **new_alien; for_each_online_node(node) { - if (use_alien_caches) { - new_alien = alloc_alien_cache(node, cachep->limit); - if (!new_alien) - goto fail; - } + new_alien = alloc_alien_cache(node, cachep->limit); + if (!new_alien) + goto fail; new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, @@ -3919,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ -static void cache_reap(struct work_struct *unused) +static void cache_reap(void *unused) { struct kmem_cache *searchp; struct kmem_list3 *l3; @@ -4142,7 +4038,7 @@ static int s_show(struct seq_file *m, void *p) * + further values on SMP and with statistics enabled */ -const struct seq_operations slabinfo_op = { +struct seq_operations slabinfo_op = { .start = s_start, .next = s_next, .stop = s_stop, @@ -4340,7 +4236,7 @@ static int leaks_show(struct seq_file *m, void *p) return 0; } -const struct seq_operations slabstats_op = { +struct seq_operations slabstats_op = { .start = leaks_start, .next = s_next, .stop = s_stop, diff --git a/trunk/mm/sparse.c b/trunk/mm/sparse.c index ac26eb0d73cd..b3c82ba30012 100644 --- a/trunk/mm/sparse.c +++ b/trunk/mm/sparse.c @@ -24,25 +24,6 @@ struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] #endif EXPORT_SYMBOL(mem_section); -#ifdef NODE_NOT_IN_PAGE_FLAGS -/* - * If we did not store the node number in the page then we have to - * do a lookup in the section_to_node_table in order to find which - * node the page belongs to. - */ -#if MAX_NUMNODES <= 256 -static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; -#else -static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; -#endif - -int page_to_nid(struct page *page) -{ - return section_to_node_table[page_to_section(page)]; -} -EXPORT_SYMBOL(page_to_nid); -#endif - #ifdef CONFIG_SPARSEMEM_EXTREME static struct mem_section *sparse_index_alloc(int nid) { @@ -68,10 +49,6 @@ static int sparse_index_init(unsigned long section_nr, int nid) struct mem_section *section; int ret = 0; -#ifdef NODE_NOT_IN_PAGE_FLAGS - section_to_node_table[section_nr] = nid; -#endif - if (mem_section[root]) return -EEXIST; diff --git a/trunk/mm/swap.c b/trunk/mm/swap.c index 2ed7be39795e..2e0e871f542f 100644 --- a/trunk/mm/swap.c +++ b/trunk/mm/swap.c @@ -57,9 +57,9 @@ static void put_compound_page(struct page *page) { page = (struct page *)page_private(page); if (put_page_testzero(page)) { - compound_page_dtor *dtor; + void (*dtor)(struct page *page); - dtor = get_compound_page_dtor(page); + dtor = (void (*)(struct page *))page[1].lru.next; (*dtor)(page); } } @@ -216,7 +216,7 @@ void lru_add_drain(void) } #ifdef CONFIG_NUMA -static void lru_add_drain_per_cpu(struct work_struct *dummy) +static void lru_add_drain_per_cpu(void *dummy) { lru_add_drain(); } @@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) */ int lru_add_drain_all(void) { - return schedule_on_each_cpu(lru_add_drain_per_cpu); + return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); } #else @@ -514,7 +514,5 @@ void __init swap_setup(void) * Right now other parts of the system means that we * _really_ don't want to cluster much more */ -#ifdef CONFIG_HOTPLUG_CPU hotcpu_notifier(cpu_swap_callback, 0); -#endif } diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index c5431072f422..a15def63f28f 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -427,48 +427,34 @@ void free_swap_and_cache(swp_entry_t entry) #ifdef CONFIG_SOFTWARE_SUSPEND /* - * Find the swap type that corresponds to given device (if any). + * Find the swap type that corresponds to given device (if any) * - * @offset - number of the PAGE_SIZE-sized block of the device, starting - * from 0, in which the swap header is expected to be located. - * - * This is needed for the suspend to disk (aka swsusp). + * This is needed for software suspend and is done in such a way that inode + * aliasing is allowed. */ -int swap_type_of(dev_t device, sector_t offset) +int swap_type_of(dev_t device) { - struct block_device *bdev = NULL; int i; - if (device) - bdev = bdget(device); - spin_lock(&swap_lock); for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *sis = swap_info + i; + struct inode *inode; - if (!(sis->flags & SWP_WRITEOK)) + if (!(swap_info[i].flags & SWP_WRITEOK)) continue; - if (!bdev) { + if (!device) { spin_unlock(&swap_lock); return i; } - if (bdev == sis->bdev) { - struct swap_extent *se; - - se = list_entry(sis->extent_list.next, - struct swap_extent, list); - if (se->start_block == offset) { - spin_unlock(&swap_lock); - bdput(bdev); - return i; - } + inode = swap_info[i].swap_file->f_dentry->d_inode; + if (S_ISBLK(inode->i_mode) && + device == MKDEV(imajor(inode), iminor(inode))) { + spin_unlock(&swap_lock); + return i; } } spin_unlock(&swap_lock); - if (bdev) - bdput(bdev); - return -ENODEV; } @@ -945,23 +931,6 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) } } -#ifdef CONFIG_SOFTWARE_SUSPEND -/* - * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev - * corresponding to given index in swap_info (swap type). - */ -sector_t swapdev_block(int swap_type, pgoff_t offset) -{ - struct swap_info_struct *sis; - - if (swap_type >= nr_swapfiles) - return 0; - - sis = swap_info + swap_type; - return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; -} -#endif /* CONFIG_SOFTWARE_SUSPEND */ - /* * Free all of a swapdev's extent information */ @@ -1305,13 +1274,10 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) mutex_lock(&swapon_mutex); - if (!l) - return SEQ_START_TOKEN; - for (i = 0; i < nr_swapfiles; i++, ptr++) { if (!(ptr->flags & SWP_USED) || !ptr->swap_map) continue; - if (!--l) + if (!l--) return ptr; } @@ -1320,17 +1286,10 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { - struct swap_info_struct *ptr; + struct swap_info_struct *ptr = v; struct swap_info_struct *endptr = swap_info + nr_swapfiles; - if (v == SEQ_START_TOKEN) - ptr = swap_info; - else { - ptr = v; - ptr++; - } - - for (; ptr < endptr; ptr++) { + for (++ptr; ptr < endptr; ptr++) { if (!(ptr->flags & SWP_USED) || !ptr->swap_map) continue; ++*pos; @@ -1351,10 +1310,8 @@ static int swap_show(struct seq_file *swap, void *v) struct file *file; int len; - if (ptr == SEQ_START_TOKEN) { - seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); - return 0; - } + if (v == swap_info) + seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); file = ptr->swap_file; len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\"); @@ -1368,7 +1325,7 @@ static int swap_show(struct seq_file *swap, void *v) return 0; } -static const struct seq_operations swaps_op = { +static struct seq_operations swaps_op = { .start = swap_start, .next = swap_next, .stop = swap_stop, @@ -1380,7 +1337,7 @@ static int swaps_open(struct inode *inode, struct file *file) return seq_open(file, &swaps_op); } -static const struct file_operations proc_swaps_operations = { +static struct file_operations proc_swaps_operations = { .open = swaps_open, .read = seq_read, .llseek = seq_lseek, @@ -1583,11 +1540,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = -EINVAL; if (!maxpages) goto bad_swap; - if (swapfilesize && maxpages > swapfilesize) { - printk(KERN_WARNING - "Swap area shorter than signature indicates\n"); - goto bad_swap; - } if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) goto bad_swap; if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) @@ -1615,6 +1567,12 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) goto bad_swap; } + if (swapfilesize && maxpages > swapfilesize) { + printk(KERN_WARNING + "Swap area shorter than signature indicates\n"); + error = -EINVAL; + goto bad_swap; + } if (nr_good_pages) { p->swap_map[0] = SWAP_MAP_BAD; p->max = maxpages; diff --git a/trunk/mm/thrash.c b/trunk/mm/thrash.c index 9ef9071f99bc..f4c560b4a2b7 100644 --- a/trunk/mm/thrash.c +++ b/trunk/mm/thrash.c @@ -7,74 +7,100 @@ * * Simple token based thrashing protection, using the algorithm * described in: http://www.cs.wm.edu/~sjiang/token.pdf - * - * Sep 2006, Ashwin Chaugule - * Improved algorithm to pass token: - * Each task has a priority which is incremented if it contended - * for the token in an interval less than its previous attempt. - * If the token is acquired, that task's priority is boosted to prevent - * the token from bouncing around too often and to let the task make - * some progress in its execution. */ - #include #include #include #include static DEFINE_SPINLOCK(swap_token_lock); -struct mm_struct *swap_token_mm; -static unsigned int global_faults; +static unsigned long swap_token_timeout; +static unsigned long swap_token_check; +struct mm_struct * swap_token_mm = &init_mm; -void grab_swap_token(void) -{ - int current_interval; +#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2) +#define SWAP_TOKEN_TIMEOUT (300 * HZ) +/* + * Currently disabled; Needs further code to work at HZ * 300. + */ +unsigned long swap_token_default_timeout = SWAP_TOKEN_TIMEOUT; - global_faults++; +/* + * Take the token away if the process had no page faults + * in the last interval, or if it has held the token for + * too long. + */ +#define SWAP_TOKEN_ENOUGH_RSS 1 +#define SWAP_TOKEN_TIMED_OUT 2 +static int should_release_swap_token(struct mm_struct *mm) +{ + int ret = 0; + if (!mm->recent_pagein) + ret = SWAP_TOKEN_ENOUGH_RSS; + else if (time_after(jiffies, swap_token_timeout)) + ret = SWAP_TOKEN_TIMED_OUT; + mm->recent_pagein = 0; + return ret; +} - current_interval = global_faults - current->mm->faultstamp; +/* + * Try to grab the swapout protection token. We only try to + * grab it once every TOKEN_CHECK_INTERVAL, both to prevent + * SMP lock contention and to check that the process that held + * the token before is no longer thrashing. + */ +void grab_swap_token(void) +{ + struct mm_struct *mm; + int reason; - if (!spin_trylock(&swap_token_lock)) + /* We have the token. Let others know we still need it. */ + if (has_swap_token(current->mm)) { + current->mm->recent_pagein = 1; + if (unlikely(!swap_token_default_timeout)) + disable_swap_token(); return; - - /* First come first served */ - if (swap_token_mm == NULL) { - current->mm->token_priority = current->mm->token_priority + 2; - swap_token_mm = current->mm; - goto out; } - if (current->mm != swap_token_mm) { - if (current_interval < current->mm->last_interval) - current->mm->token_priority++; - else { - current->mm->token_priority--; - if (unlikely(current->mm->token_priority < 0)) - current->mm->token_priority = 0; + if (time_after(jiffies, swap_token_check)) { + + if (!swap_token_default_timeout) { + swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + return; } - /* Check if we deserve the token */ - if (current->mm->token_priority > - swap_token_mm->token_priority) { - current->mm->token_priority += 2; + + /* ... or if we recently held the token. */ + if (time_before(jiffies, current->mm->swap_token_time)) + return; + + if (!spin_trylock(&swap_token_lock)) + return; + + swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + + mm = swap_token_mm; + if ((reason = should_release_swap_token(mm))) { + unsigned long eligible = jiffies; + if (reason == SWAP_TOKEN_TIMED_OUT) { + eligible += swap_token_default_timeout; + } + mm->swap_token_time = eligible; + swap_token_timeout = jiffies + swap_token_default_timeout; swap_token_mm = current->mm; } - } else { - /* Token holder came in again! */ - current->mm->token_priority += 2; + spin_unlock(&swap_token_lock); } - -out: - current->mm->faultstamp = global_faults; - current->mm->last_interval = current_interval; - spin_unlock(&swap_token_lock); -return; + return; } /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); - if (likely(mm == swap_token_mm)) - swap_token_mm = NULL; + if (likely(mm == swap_token_mm)) { + mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + swap_token_mm = &init_mm; + swap_token_check = jiffies; + } spin_unlock(&swap_token_lock); } diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 093f5fe6dd77..518540a4a2a6 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -1173,12 +1172,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) if (!zone_watermark_ok(zone, order, zone->pages_high, 0, 0)) { end_zone = i; - break; + goto scan; } } - if (i < 0) - goto out; - + goto out; +scan: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; @@ -1261,9 +1259,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) } if (!all_zones_ok) { cond_resched(); - - try_to_freeze(); - goto loop_again; } @@ -1513,6 +1508,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) } #endif +#ifdef CONFIG_HOTPLUG_CPU /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, @@ -1533,6 +1529,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, } return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ /* * This kswapd start function will be called by init and node-hot-add. diff --git a/trunk/mm/vmstat.c b/trunk/mm/vmstat.c index dc005a0c96ae..8614e8f6743b 100644 --- a/trunk/mm/vmstat.c +++ b/trunk/mm/vmstat.c @@ -430,7 +430,7 @@ static int frag_show(struct seq_file *m, void *arg) return 0; } -const struct seq_operations fragmentation_op = { +struct seq_operations fragmentation_op = { .start = frag_start, .next = frag_next, .stop = frag_stop, @@ -452,7 +452,7 @@ const struct seq_operations fragmentation_op = { #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ TEXT_FOR_HIGHMEM(xx) -static const char * const vmstat_text[] = { +static char *vmstat_text[] = { /* Zoned VM counters */ "nr_anon_pages", "nr_mapped", @@ -597,7 +597,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg) return 0; } -const struct seq_operations zoneinfo_op = { +struct seq_operations zoneinfo_op = { .start = frag_start, /* iterate over all zones. The same as in * fragmentation. */ .next = frag_next, @@ -660,7 +660,7 @@ static void vmstat_stop(struct seq_file *m, void *arg) m->private = NULL; } -const struct seq_operations vmstat_op = { +struct seq_operations vmstat_op = { .start = vmstat_start, .next = vmstat_next, .stop = vmstat_stop, @@ -679,13 +679,13 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, void *hcpu) { switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_CANCELED: - case CPU_DEAD: - refresh_zone_stat_thresholds(); - break; - default: - break; + case CPU_UP_PREPARE: + case CPU_UP_CANCELED: + case CPU_DEAD: + refresh_zone_stat_thresholds(); + break; + default: + break; } return NOTIFY_OK; } diff --git a/trunk/net/atm/lec.c b/trunk/net/atm/lec.c index 3fc0abeeaf34..5946ec63724f 100644 --- a/trunk/net/atm/lec.c +++ b/trunk/net/atm/lec.c @@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, #define LEC_ARP_REFRESH_INTERVAL (3*HZ) -static void lec_arp_check_expire(struct work_struct *work); +static void lec_arp_check_expire(void *data); static void lec_arp_expire_arp(unsigned long data); /* @@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); - INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); + INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } @@ -1875,11 +1875,10 @@ static void lec_arp_expire_vcc(unsigned long data) * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ -static void lec_arp_check_expire(struct work_struct *work) +static void lec_arp_check_expire(void *data) { unsigned long flags; - struct lec_priv *priv = - container_of(work, struct lec_priv, lec_arp_work.work); + struct lec_priv *priv = data; struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; diff --git a/trunk/net/atm/lec.h b/trunk/net/atm/lec.h index 99136babd535..24cc95f86741 100644 --- a/trunk/net/atm/lec.h +++ b/trunk/net/atm/lec.h @@ -92,7 +92,7 @@ struct lec_priv { spinlock_t lec_arp_lock; struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ struct atm_vcc *lecd; - struct delayed_work lec_arp_work; /* C10 */ + struct work_struct lec_arp_work; /* C10 */ unsigned int maximum_unknown_frame_count; /* * Within the period of time defined by this variable, the client will send diff --git a/trunk/net/bluetooth/hci_sysfs.c b/trunk/net/bluetooth/hci_sysfs.c index d4c935692ccf..3eeeb7a86e75 100644 --- a/trunk/net/bluetooth/hci_sysfs.c +++ b/trunk/net/bluetooth/hci_sysfs.c @@ -237,9 +237,9 @@ static void bt_release(struct device *dev) kfree(data); } -static void add_conn(struct work_struct *work) +static void add_conn(void *data) { - struct hci_conn *conn = container_of(work, struct hci_conn, work); + struct hci_conn *conn = data; int i; if (device_register(&conn->dev) < 0) { @@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) dev_set_drvdata(&conn->dev, conn); - INIT_WORK(&conn->work, add_conn); + INIT_WORK(&conn->work, add_conn, (void *) conn); schedule_work(&conn->work); } -static void del_conn(struct work_struct *work) +static void del_conn(void *data) { - struct hci_conn *conn = container_of(work, struct hci_conn, work); + struct hci_conn *conn = data; device_del(&conn->dev); } @@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) { BT_DBG("conn %p", conn); - INIT_WORK(&conn->work, del_conn); + INIT_WORK(&conn->work, del_conn, (void *) conn); schedule_work(&conn->work); } diff --git a/trunk/net/bridge/br_fdb.c b/trunk/net/bridge/br_fdb.c index 8ca448db7a0d..d9f04864d15d 100644 --- a/trunk/net/bridge/br_fdb.c +++ b/trunk/net/bridge/br_fdb.c @@ -23,7 +23,7 @@ #include #include "br_private.h" -static struct kmem_cache *br_fdb_cache __read_mostly; +static kmem_cache_t *br_fdb_cache __read_mostly; static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c index 55bb2634c088..f753c40c11d2 100644 --- a/trunk/net/bridge/br_if.c +++ b/trunk/net/bridge/br_if.c @@ -77,16 +77,12 @@ static int port_cost(struct net_device *dev) * Called from work queue to allow for calling functions that * might sleep (such as speed check), and to debounce. */ -static void port_carrier_check(struct work_struct *work) +static void port_carrier_check(void *arg) { + struct net_device *dev = arg; struct net_bridge_port *p; - struct net_device *dev; struct net_bridge *br; - dev = container_of(work, struct net_bridge_port, - carrier_check.work)->dev; - work_release(work); - rtnl_lock(); p = dev->br_port; if (!p) @@ -280,7 +276,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->port_no = index; br_init_port(p); p->state = BR_STATE_DISABLED; - INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); + INIT_WORK(&p->carrier_check, port_carrier_check, dev); br_stp_port_timer_init(p); kobject_init(&p->kobj); diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h index 3a534e94c7f3..74258d86f256 100644 --- a/trunk/net/bridge/br_private.h +++ b/trunk/net/bridge/br_private.h @@ -82,7 +82,7 @@ struct net_bridge_port struct timer_list hold_timer; struct timer_list message_age_timer; struct kobject kobj; - struct delayed_work carrier_check; + struct work_struct carrier_check; struct rcu_head rcu; }; diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index e660cb57e42a..59d058a3b504 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -3340,6 +3340,7 @@ void unregister_netdev(struct net_device *dev) EXPORT_SYMBOL(unregister_netdev); +#ifdef CONFIG_HOTPLUG_CPU static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) @@ -3383,6 +3384,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_NET_DMA /** diff --git a/trunk/net/core/dst.c b/trunk/net/core/dst.c index 836ec6606925..1a5e49da0e77 100644 --- a/trunk/net/core/dst.c +++ b/trunk/net/core/dst.c @@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops) if (ops->gc()) return NULL; } - dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); + dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); if (!dst) return NULL; memset(dst, 0, ops->entry_size); diff --git a/trunk/net/core/flow.c b/trunk/net/core/flow.c index d137f971f97d..b16d31ae5e54 100644 --- a/trunk/net/core/flow.c +++ b/trunk/net/core/flow.c @@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; #define flow_table(cpu) (per_cpu(flow_tables, cpu)) -static struct kmem_cache *flow_cachep __read_mostly; +static kmem_cache_t *flow_cachep __read_mostly; static int flow_lwm, flow_hwm; @@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, if (flow_count(cpu) > flow_hwm) flow_cache_shrink(cpu); - fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); + fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); if (fle) { fle->next = *head; *head = fle; @@ -340,6 +340,7 @@ static void __devinit flow_cache_cpu_prepare(int cpu) tasklet_init(tasklet, flow_cache_flush_tasklet, 0); } +#ifdef CONFIG_HOTPLUG_CPU static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -348,6 +349,7 @@ static int flow_cache_cpu(struct notifier_block *nfb, __flow_cache_shrink((unsigned long)hcpu, 0); return NOTIFY_OK; } +#endif /* CONFIG_HOTPLUG_CPU */ static int __init flow_cache_init(void) { diff --git a/trunk/net/core/link_watch.c b/trunk/net/core/link_watch.c index 549a2ce951b0..4b36114744c5 100644 --- a/trunk/net/core/link_watch.c +++ b/trunk/net/core/link_watch.c @@ -34,8 +34,8 @@ enum lw_bits { static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; -static void linkwatch_event(struct work_struct *dummy); -static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); +static void linkwatch_event(void *dummy); +static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); static LIST_HEAD(lweventlist); static DEFINE_SPINLOCK(lweventlist_lock); @@ -127,7 +127,7 @@ void linkwatch_run_queue(void) } -static void linkwatch_event(struct work_struct *dummy) +static void linkwatch_event(void *dummy) { /* Limit the number of linkwatch events to one * per second so that a runaway driver does not @@ -171,9 +171,10 @@ void linkwatch_fire_event(struct net_device *dev) unsigned long delay = linkwatch_nextevent - jiffies; /* If we wrap around we'll delay it by at most HZ. */ - if (delay > HZ) - delay = 0; - schedule_delayed_work(&linkwatch_work, delay); + if (!delay || delay > HZ) + schedule_work(&linkwatch_work); + else + schedule_delayed_work(&linkwatch_work, delay); } } } diff --git a/trunk/net/core/neighbour.c b/trunk/net/core/neighbour.c index 0ab1987b9348..ba509a4a8e92 100644 --- a/trunk/net/core/neighbour.c +++ b/trunk/net/core/neighbour.c @@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) goto out_entries; } - n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); + n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); if (!n) goto out_entries; diff --git a/trunk/net/core/netpoll.c b/trunk/net/core/netpoll.c index b3c559b9ac35..3c58846fcaa5 100644 --- a/trunk/net/core/netpoll.c +++ b/trunk/net/core/netpoll.c @@ -50,10 +50,9 @@ static atomic_t trapped; static void zap_completion_queue(void); static void arp_reply(struct sk_buff *skb); -static void queue_process(struct work_struct *work) +static void queue_process(void *p) { - struct netpoll_info *npinfo = - container_of(work, struct netpoll_info, tx_work.work); + struct netpoll_info *npinfo = p; struct sk_buff *skb; while ((skb = skb_dequeue(&npinfo->txq))) { @@ -73,6 +72,8 @@ static void queue_process(struct work_struct *work) schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } + + netif_tx_unlock_bh(dev); } } @@ -262,7 +263,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) if (status != NETDEV_TX_OK) { skb_queue_tail(&npinfo->txq, skb); - schedule_delayed_work(&npinfo->tx_work,0); + schedule_work(&npinfo->tx_work); } } @@ -627,7 +628,7 @@ int netpoll_setup(struct netpoll *np) spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->txq); - INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); + INIT_WORK(&npinfo->tx_work, queue_process, npinfo); atomic_set(&npinfo->refcnt, 1); } else { diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index de7801d589e7..8e1c385e5ba9 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -68,8 +68,8 @@ #include "kmap_skb.h" -static struct kmem_cache *skbuff_head_cache __read_mostly; -static struct kmem_cache *skbuff_fclone_cache __read_mostly; +static kmem_cache_t *skbuff_head_cache __read_mostly; +static kmem_cache_t *skbuff_fclone_cache __read_mostly; /* * Keep out-of-line to prevent kernel bloat. @@ -132,7 +132,6 @@ EXPORT_SYMBOL(skb_truesize_bug); * @gfp_mask: allocation mask * @fclone: allocate from fclone cache instead of head cache * and allocate a cloned (child) skb - * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. @@ -142,9 +141,9 @@ EXPORT_SYMBOL(skb_truesize_bug); * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, - int fclone, int node) + int fclone) { - struct kmem_cache *cache; + kmem_cache_t *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; @@ -152,14 +151,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; /* Get the HEAD */ - skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); + skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); if (!skb) goto out; /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), - gfp_mask, node); + data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), + gfp_mask); if (!data) goto nodata; @@ -211,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, +struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, gfp_t gfp_mask) { @@ -268,10 +267,9 @@ struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask) { - int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; struct sk_buff *skb; - skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); + skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index 0ed5b4f0bc40..419c7d3289c7 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -810,11 +810,24 @@ int sock_getsockopt(struct socket *sock, int level, int optname, */ static void inline sock_lock_init(struct sock *sk) { - sock_lock_init_class_and_name(sk, - af_family_slock_key_strings[sk->sk_family], - af_family_slock_keys + sk->sk_family, - af_family_key_strings[sk->sk_family], - af_family_keys + sk->sk_family); + spin_lock_init(&sk->sk_lock.slock); + sk->sk_lock.owner = NULL; + init_waitqueue_head(&sk->sk_lock.wq); + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); + + /* + * Mark both the sk_lock and the sk_lock.slock as a + * per-address-family lock class: + */ + lockdep_set_class_and_name(&sk->sk_lock.slock, + af_family_slock_keys + sk->sk_family, + af_family_slock_key_strings[sk->sk_family]); + lockdep_init_map(&sk->sk_lock.dep_map, + af_family_key_strings[sk->sk_family], + af_family_keys + sk->sk_family, 0); } /** @@ -828,7 +841,7 @@ struct sock *sk_alloc(int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; - struct kmem_cache *slab = prot->slab; + kmem_cache_t *slab = prot->slab; if (slab != NULL) sk = kmem_cache_alloc(slab, priority); diff --git a/trunk/net/dccp/ackvec.c b/trunk/net/dccp/ackvec.c index 1f4727ddbdbf..bdf1bb7a82c0 100644 --- a/trunk/net/dccp/ackvec.c +++ b/trunk/net/dccp/ackvec.c @@ -21,8 +21,8 @@ #include -static struct kmem_cache *dccp_ackvec_slab; -static struct kmem_cache *dccp_ackvec_record_slab; +static kmem_cache_t *dccp_ackvec_slab; +static kmem_cache_t *dccp_ackvec_record_slab; static struct dccp_ackvec_record *dccp_ackvec_record_new(void) { diff --git a/trunk/net/dccp/ccid.c b/trunk/net/dccp/ccid.c index d8cf92f09e68..ff05e59043cd 100644 --- a/trunk/net/dccp/ccid.c +++ b/trunk/net/dccp/ccid.c @@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void) #define ccids_read_unlock() do { } while(0) #endif -static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) +static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) { - struct kmem_cache *slab; + kmem_cache_t *slab; char slab_name_fmt[32], *slab_name; va_list args; @@ -75,7 +75,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,. return slab; } -static void ccid_kmem_cache_destroy(struct kmem_cache *slab) +static void ccid_kmem_cache_destroy(kmem_cache_t *slab) { if (slab != NULL) { const char *name = kmem_cache_name(slab); diff --git a/trunk/net/dccp/ccid.h b/trunk/net/dccp/ccid.h index bcc2d12ae81c..c7c29514dce8 100644 --- a/trunk/net/dccp/ccid.h +++ b/trunk/net/dccp/ccid.h @@ -27,9 +27,9 @@ struct ccid_operations { unsigned char ccid_id; const char *ccid_name; struct module *ccid_owner; - struct kmem_cache *ccid_hc_rx_slab; + kmem_cache_t *ccid_hc_rx_slab; __u32 ccid_hc_rx_obj_size; - struct kmem_cache *ccid_hc_tx_slab; + kmem_cache_t *ccid_hc_tx_slab; __u32 ccid_hc_tx_obj_size; int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); diff --git a/trunk/net/dccp/ccids/ccid3.c b/trunk/net/dccp/ccids/ccid3.c index 66a27b9688ca..cf8c07b2704f 100644 --- a/trunk/net/dccp/ccids/ccid3.c +++ b/trunk/net/dccp/ccids/ccid3.c @@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); if (new_packet == NULL || new_packet->dccphtx_sent) { new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, - GFP_ATOMIC); + SLAB_ATOMIC); if (unlikely(new_packet == NULL)) { DCCP_WARN("%s, sk=%p, not enough mem to add to history," @@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) /* new loss event detected */ /* calculate last interval length */ seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); - entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC); + entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); if (entry == NULL) { DCCP_BUG("out of memory - can not allocate entry"); @@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) } packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, - skb, GFP_ATOMIC); + skb, SLAB_ATOMIC); if (unlikely(packet == NULL)) { DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " "to history, consider it lost!\n", dccp_role(sk), sk); diff --git a/trunk/net/dccp/ccids/lib/loss_interval.c b/trunk/net/dccp/ccids/lib/loss_interval.c index 0a0baef16b3e..48b9b93f8acb 100644 --- a/trunk/net/dccp/ccids/lib/loss_interval.c +++ b/trunk/net/dccp/ccids/lib/loss_interval.c @@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, int i; for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { - entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); + entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); DCCP_BUG("loss interval list entry is NULL"); diff --git a/trunk/net/dccp/ccids/lib/loss_interval.h b/trunk/net/dccp/ccids/lib/loss_interval.h index eb257014dd74..0ae85f0340b2 100644 --- a/trunk/net/dccp/ccids/lib/loss_interval.h +++ b/trunk/net/dccp/ccids/lib/loss_interval.h @@ -20,7 +20,7 @@ #define DCCP_LI_HIST_IVAL_F_LENGTH 8 struct dccp_li_hist { - struct kmem_cache *dccplih_slab; + kmem_cache_t *dccplih_slab; }; extern struct dccp_li_hist *dccp_li_hist_new(const char *name); diff --git a/trunk/net/dccp/ccids/lib/packet_history.h b/trunk/net/dccp/ccids/lib/packet_history.h index 9a8bcf224aa7..067cf1c85a37 100644 --- a/trunk/net/dccp/ccids/lib/packet_history.h +++ b/trunk/net/dccp/ccids/lib/packet_history.h @@ -68,14 +68,14 @@ struct dccp_rx_hist_entry { }; struct dccp_tx_hist { - struct kmem_cache *dccptxh_slab; + kmem_cache_t *dccptxh_slab; }; extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); struct dccp_rx_hist { - struct kmem_cache *dccprxh_slab; + kmem_cache_t *dccprxh_slab; }; extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); diff --git a/trunk/net/dccp/minisocks.c b/trunk/net/dccp/minisocks.c index 4c9e26775f72..7b52f2a03eef 100644 --- a/trunk/net/dccp/minisocks.c +++ b/trunk/net/dccp/minisocks.c @@ -32,7 +32,8 @@ struct inet_timewait_death_row dccp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&dccp_death_row), .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, - inet_twdr_twkill_work), + inet_twdr_twkill_work, + &dccp_death_row), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/trunk/net/decnet/dn_table.c b/trunk/net/decnet/dn_table.c index 13b2421991ba..bdbc3f431668 100644 --- a/trunk/net/decnet/dn_table.c +++ b/trunk/net/decnet/dn_table.c @@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; static DEFINE_RWLOCK(dn_fib_tables_lock); -static struct kmem_cache *dn_hash_kmem __read_mostly; +static kmem_cache_t *dn_hash_kmem __read_mostly; static int dn_fib_hash_zombies; static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) @@ -590,7 +590,7 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct replace: err = -ENOBUFS; - new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); + new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); if (new_f == NULL) goto out; diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c b/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c index 08386c102954..cf51c87a971d 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -58,11 +58,9 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft } void -ieee80211softmac_assoc_timeout(struct work_struct *work) +ieee80211softmac_assoc_timeout(void *d) { - struct ieee80211softmac_device *mac = - container_of(work, struct ieee80211softmac_device, - associnfo.timeout.work); + struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; struct ieee80211softmac_network *n; mutex_lock(&mac->associnfo.mutex); @@ -188,11 +186,9 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void /* This function is called to handle userspace requests (asynchronously) */ void -ieee80211softmac_assoc_work(struct work_struct *work) +ieee80211softmac_assoc_work(void *d) { - struct ieee80211softmac_device *mac = - container_of(work, struct ieee80211softmac_device, - associnfo.work.work); + struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; struct ieee80211softmac_network *found = NULL; struct ieee80211_network *net = NULL, *best = NULL; int bssvalid; @@ -416,7 +412,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, network->authenticated = 0; /* we don't want to do this more than once ... */ network->auth_desynced_once = 1; - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); break; } default: @@ -450,7 +446,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, ieee80211softmac_disassoc(mac); /* try to reassociate */ - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); return 0; } @@ -470,7 +466,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); return 0; } - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); return 0; } diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_auth.c b/trunk/net/ieee80211/softmac/ieee80211softmac_auth.c index 6012705aa4f8..0612015f1c78 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -26,7 +26,7 @@ #include "ieee80211softmac_priv.h" -static void ieee80211softmac_auth_queue(struct work_struct *work); +static void ieee80211softmac_auth_queue(void *data); /* Queues an auth request to the desired AP */ int @@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, auth->mac = mac; auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; - INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); + INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); /* Lock (for list) */ spin_lock_irqsave(&mac->lock, flags); /* add to list */ list_add_tail(&auth->list, &mac->auth_queue); - schedule_delayed_work(&auth->work, 0); + schedule_work(&auth->work); spin_unlock_irqrestore(&mac->lock, flags); return 0; @@ -70,15 +70,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, /* Sends an auth request to the desired AP and handles timeouts */ static void -ieee80211softmac_auth_queue(struct work_struct *work) +ieee80211softmac_auth_queue(void *data) { struct ieee80211softmac_device *mac; struct ieee80211softmac_auth_queue_item *auth; struct ieee80211softmac_network *net; unsigned long flags; - auth = container_of(work, struct ieee80211softmac_auth_queue_item, - work.work); + auth = (struct ieee80211softmac_auth_queue_item *)data; net = auth->net; mac = auth->mac; @@ -119,11 +118,9 @@ ieee80211softmac_auth_queue(struct work_struct *work) /* Sends a response to an auth challenge (for shared key auth). */ static void -ieee80211softmac_auth_challenge_response(struct work_struct *work) +ieee80211softmac_auth_challenge_response(void *_aq) { - struct ieee80211softmac_auth_queue_item *aq = - container_of(work, struct ieee80211softmac_auth_queue_item, - work.work); + struct ieee80211softmac_auth_queue_item *aq = _aq; /* Send our response */ ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); @@ -237,8 +234,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) * we have obviously already sent the initial auth * request. */ cancel_delayed_work(&aq->work); - INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); - schedule_delayed_work(&aq->work, 0); + INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); + schedule_work(&aq->work); spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: @@ -401,6 +398,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de ieee80211softmac_deauth_from_net(mac, net); /* let's try to re-associate */ - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); return 0; } diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_event.c b/trunk/net/ieee80211/softmac/ieee80211softmac_event.c index b9015656cfb3..f34fa2ef666b 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_event.c @@ -73,12 +73,10 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { static void -ieee80211softmac_notify_callback(struct work_struct *work) +ieee80211softmac_notify_callback(void *d) { - struct ieee80211softmac_event *pevent = - container_of(work, struct ieee80211softmac_event, work.work); - struct ieee80211softmac_event event = *pevent; - kfree(pevent); + struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; + kfree(d); event.fun(event.mac->dev, event.event_type, event.context); } @@ -101,7 +99,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, return -ENOMEM; eventptr->event_type = event; - INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); + INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); eventptr->fun = fun; eventptr->context = context; eventptr->mac = mac; @@ -172,7 +170,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve /* User may have subscribed to ANY event, so * we tell them which event triggered it. */ eventptr->event_type = event; - schedule_delayed_work(&eventptr->work, 0); + schedule_work(&eventptr->work); } } } diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_module.c b/trunk/net/ieee80211/softmac/ieee80211softmac_module.c index 256207b71dc9..33aff4f4a471 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_module.c @@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) INIT_LIST_HEAD(&softmac->events); mutex_init(&softmac->associnfo.mutex); - INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); - INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); + INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); + INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); softmac->start_scan = ieee80211softmac_start_scan_implementation; softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; softmac->stop_scan = ieee80211softmac_stop_scan_implementation; diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_priv.h b/trunk/net/ieee80211/softmac/ieee80211softmac_priv.h index c0dbe070e548..0642e090b8a7 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_priv.h @@ -78,7 +78,7 @@ /* private definitions and prototypes */ /*** prototypes from _scan.c */ -void ieee80211softmac_scan(struct work_struct *work); +void ieee80211softmac_scan(void *sm); /* for internal use if scanning is needed */ int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); @@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); /*** prototypes from _assoc.c */ -void ieee80211softmac_assoc_work(struct work_struct *work); +void ieee80211softmac_assoc_work(void *d); int ieee80211softmac_handle_assoc_response(struct net_device * dev, struct ieee80211_assoc_response * resp, struct ieee80211_network * network); @@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, struct ieee80211_disassoc * disassoc); int ieee80211softmac_handle_reassoc_req(struct net_device * dev, struct ieee80211_reassoc_request * reassoc); -void ieee80211softmac_assoc_timeout(struct work_struct *work); +void ieee80211softmac_assoc_timeout(void *d); void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); @@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { struct ieee80211softmac_device *mac; /* SoftMAC device */ u8 retry; /* Retry limit */ u8 state; /* Auth State */ - struct delayed_work work; /* Work queue */ + struct work_struct work; /* Work queue */ }; /* scanning information */ @@ -219,8 +219,7 @@ struct ieee80211softmac_scaninfo { stop:1; u8 skip_flags; struct completion finished; - struct delayed_work softmac_scan; - struct ieee80211softmac_device *mac; + struct work_struct softmac_scan; }; /* private event struct */ @@ -228,7 +227,7 @@ struct ieee80211softmac_event { struct list_head list; int event_type; void *event_context; - struct delayed_work work; + struct work_struct work; notify_function_ptr fun; void *context; struct ieee80211softmac_device *mac; diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_scan.c b/trunk/net/ieee80211/softmac/ieee80211softmac_scan.c index 0c85d6c24cdb..5507feab32de 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_scan.c @@ -90,14 +90,12 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) /* internal scanning implementation follows */ -void ieee80211softmac_scan(struct work_struct *work) +void ieee80211softmac_scan(void *d) { int invalid_channel; u8 current_channel_idx; - struct ieee80211softmac_scaninfo *si = - container_of(work, struct ieee80211softmac_scaninfo, - softmac_scan.work); - struct ieee80211softmac_device *sm = si->mac; + struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; + struct ieee80211softmac_scaninfo *si = sm->scaninfo; unsigned long flags; while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { @@ -148,8 +146,7 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); if (unlikely(!info)) return NULL; - INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); - info->mac = mac; + INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); init_completion(&info->finished); return info; } @@ -190,7 +187,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) sm->scaninfo->started = 1; sm->scaninfo->stop = 0; INIT_COMPLETION(sm->scaninfo->finished); - schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); + schedule_work(&sm->scaninfo->softmac_scan); spin_unlock_irqrestore(&sm->lock, flags); return 0; } diff --git a/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c b/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c index 2ffaebd21c53..23068a830f7d 100644 --- a/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/trunk/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.associating = 1; /* queue lower level code to do work (if necessary) */ - schedule_delayed_work(&sm->associnfo.work, 0); + schedule_work(&sm->associnfo.work); out: mutex_unlock(&sm->associnfo.mutex); @@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* force reassociation */ mac->associnfo.bssvalid = 0; if (mac->associnfo.associated) - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { /* the bssid we have is no longer fixed */ mac->associnfo.bssfixed = 0; @@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* tell the other code that this bssid should be used no matter what */ mac->associnfo.bssfixed = 1; /* queue associate if new bssid or (old one again and not associated) */ - schedule_delayed_work(&mac->associnfo.work, 0); + schedule_work(&mac->associnfo.work); } out: diff --git a/trunk/net/ipv4/fib_hash.c b/trunk/net/ipv4/fib_hash.c index 648f47c1c399..107bb6cbb0b3 100644 --- a/trunk/net/ipv4/fib_hash.c +++ b/trunk/net/ipv4/fib_hash.c @@ -45,8 +45,8 @@ #include "fib_lookup.h" -static struct kmem_cache *fn_hash_kmem __read_mostly; -static struct kmem_cache *fn_alias_kmem __read_mostly; +static kmem_cache_t *fn_hash_kmem __read_mostly; +static kmem_cache_t *fn_alias_kmem __read_mostly; struct fib_node { struct hlist_node fn_hash; @@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); if (new_fa == NULL) goto out; new_f = NULL; if (!f) { - new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); + new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); if (new_f == NULL) goto out_free_new_fa; diff --git a/trunk/net/ipv4/fib_trie.c b/trunk/net/ipv4/fib_trie.c index cfb249cc0a58..d17990ec724f 100644 --- a/trunk/net/ipv4/fib_trie.c +++ b/trunk/net/ipv4/fib_trie.c @@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); static struct tnode *halve(struct trie *t, struct tnode *tn); static void tnode_free(struct tnode *tn); -static struct kmem_cache *fn_alias_kmem __read_mostly; +static kmem_cache_t *fn_alias_kmem __read_mostly; static struct trie *trie_local = NULL, *trie_main = NULL; @@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) u8 state; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); if (new_fa == NULL) goto out; @@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); if (new_fa == NULL) goto out; diff --git a/trunk/net/ipv4/inet_hashtables.c b/trunk/net/ipv4/inet_hashtables.c index 8c79c8a4ea5c..244c4f445c7d 100644 --- a/trunk/net/ipv4/inet_hashtables.c +++ b/trunk/net/ipv4/inet_hashtables.c @@ -27,11 +27,11 @@ * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ -struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, +struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, struct inet_bind_hashbucket *head, const unsigned short snum) { - struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); + struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); if (tb != NULL) { tb->port = snum; @@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, /* * Caller must hold hashbucket lock for this tb with local BH disabled */ -void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) +void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); diff --git a/trunk/net/ipv4/inet_timewait_sock.c b/trunk/net/ipv4/inet_timewait_sock.c index e28330aa4139..cdd805344c61 100644 --- a/trunk/net/ipv4/inet_timewait_sock.c +++ b/trunk/net/ipv4/inet_timewait_sock.c @@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat { struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, - GFP_ATOMIC); + SLAB_ATOMIC); if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); @@ -197,10 +197,9 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); extern void twkill_slots_invalid(void); -void inet_twdr_twkill_work(struct work_struct *work) +void inet_twdr_twkill_work(void *data) { - struct inet_timewait_death_row *twdr = - container_of(work, struct inet_timewait_death_row, twkill_work); + struct inet_timewait_death_row *twdr = data; int i; if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) diff --git a/trunk/net/ipv4/inetpeer.c b/trunk/net/ipv4/inetpeer.c index 711eb6d0285a..f072f3875af8 100644 --- a/trunk/net/ipv4/inetpeer.c +++ b/trunk/net/ipv4/inetpeer.c @@ -73,7 +73,7 @@ /* Exported for inet_getid inline function. */ DEFINE_SPINLOCK(inet_peer_idlock); -static struct kmem_cache *peer_cachep __read_mostly; +static kmem_cache_t *peer_cachep __read_mostly; #define node_height(x) x->avl_height static struct inet_peer peer_fake_node = { diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index ecb5422ea237..efcf45ecc818 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); In this case data path is free of exclusive locks at all. */ -static struct kmem_cache *mrt_cachep __read_mostly; +static kmem_cache_t *mrt_cachep __read_mostly; static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); diff --git a/trunk/net/ipv4/ipvs/ip_vs_conn.c b/trunk/net/ipv4/ipvs/ip_vs_conn.c index 8086787a2c51..8832eb517d52 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_conn.c +++ b/trunk/net/ipv4/ipvs/ip_vs_conn.c @@ -44,7 +44,7 @@ static struct list_head *ip_vs_conn_tab; /* SLAB cache for IPVS connections */ -static struct kmem_cache *ip_vs_conn_cachep __read_mostly; +static kmem_cache_t *ip_vs_conn_cachep __read_mostly; /* counter for current IPVS connections */ static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); diff --git a/trunk/net/ipv4/ipvs/ip_vs_ctl.c b/trunk/net/ipv4/ipvs/ip_vs_ctl.c index 9b933381ebbe..f261616e4602 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_ctl.c +++ b/trunk/net/ipv4/ipvs/ip_vs_ctl.c @@ -221,10 +221,10 @@ static void update_defense_level(void) * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ -static void defense_work_handler(struct work_struct *work); -static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); +static void defense_work_handler(void *data); +static DECLARE_WORK(defense_work, defense_work_handler, NULL); -static void defense_work_handler(struct work_struct *work) +static void defense_work_handler(void *data) { update_defense_level(); if (atomic_read(&ip_vs_dropentry)) diff --git a/trunk/net/ipv4/netfilter/ip_conntrack_core.c b/trunk/net/ipv4/netfilter/ip_conntrack_core.c index 8556a4f4f60a..f4b0e68a16d2 100644 --- a/trunk/net/ipv4/netfilter/ip_conntrack_core.c +++ b/trunk/net/ipv4/netfilter/ip_conntrack_core.c @@ -65,8 +65,8 @@ static LIST_HEAD(helpers); unsigned int ip_conntrack_htable_size __read_mostly = 0; int ip_conntrack_max __read_mostly; struct list_head *ip_conntrack_hash __read_mostly; -static struct kmem_cache *ip_conntrack_cachep __read_mostly; -static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; +static kmem_cache_t *ip_conntrack_cachep __read_mostly; +static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; struct ip_conntrack ip_conntrack_untracked; unsigned int ip_ct_log_invalid __read_mostly; static LIST_HEAD(unconfirmed); diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c index 4a3889dd1943..6dddf59c1fb9 100644 --- a/trunk/net/ipv4/tcp_minisocks.c +++ b/trunk/net/ipv4/tcp_minisocks.c @@ -45,7 +45,8 @@ struct inet_timewait_death_row tcp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, - inet_twdr_twkill_work), + inet_twdr_twkill_work, + &tcp_death_row), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/trunk/net/ipv6/af_inet6.c b/trunk/net/ipv6/af_inet6.c index e5cd83b2205d..87c8f54872b7 100644 --- a/trunk/net/ipv6/af_inet6.c +++ b/trunk/net/ipv6/af_inet6.c @@ -720,8 +720,10 @@ snmp6_mib_free(void *ptr[2]) { if (ptr == NULL) return; - free_percpu(ptr[0]); - free_percpu(ptr[1]); + if (ptr[0]) + free_percpu(ptr[0]); + if (ptr[1]) + free_percpu(ptr[1]); ptr[0] = ptr[1] = NULL; } diff --git a/trunk/net/ipv6/ip6_fib.c b/trunk/net/ipv6/ip6_fib.c index 96d8310ae9c8..bf526115e518 100644 --- a/trunk/net/ipv6/ip6_fib.c +++ b/trunk/net/ipv6/ip6_fib.c @@ -50,7 +50,7 @@ struct rt6_statistics rt6_stats; -static struct kmem_cache * fib6_node_kmem __read_mostly; +static kmem_cache_t * fib6_node_kmem __read_mostly; enum fib_walk_state_t { @@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void) { struct fib6_node *fn; - if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) + if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL) memset(fn, 0, sizeof(struct fib6_node)); return fn; diff --git a/trunk/net/ipv6/xfrm6_tunnel.c b/trunk/net/ipv6/xfrm6_tunnel.c index 12e426b9aacd..01a5c52a2be3 100644 --- a/trunk/net/ipv6/xfrm6_tunnel.c +++ b/trunk/net/ipv6/xfrm6_tunnel.c @@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi; #define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff -static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; +static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 @@ -180,7 +180,7 @@ try_next_2:; spi = 0; goto out; alloc_spi: - x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); + x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); if (!x6spi) goto out; diff --git a/trunk/net/irda/ircomm/ircomm_tty.c b/trunk/net/irda/ircomm/ircomm_tty.c index 262bda808d96..d50a02030ad7 100644 --- a/trunk/net/irda/ircomm/ircomm_tty.c +++ b/trunk/net/irda/ircomm/ircomm_tty.c @@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); static void ircomm_tty_hangup(struct tty_struct *tty); -static void ircomm_tty_do_softint(struct work_struct *work); +static void ircomm_tty_do_softint(void *private_); static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); static void ircomm_tty_stop(struct tty_struct *tty); @@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self->flow = FLOW_STOP; self->line = line; - INIT_WORK(&self->tqueue, ircomm_tty_do_softint); + INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; self->close_delay = 5*HZ/10; @@ -594,16 +594,15 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) } /* - * Function ircomm_tty_do_softint (work) + * Function ircomm_tty_do_softint (private_) * * We use this routine to give the write wakeup to the user at at a * safe time (as fast as possible after write have completed). This * can be compared to the Tx interrupt. */ -static void ircomm_tty_do_softint(struct work_struct *work) +static void ircomm_tty_do_softint(void *private_) { - struct ircomm_tty_cb *self = - container_of(work, struct ircomm_tty_cb, tqueue); + struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; struct tty_struct *tty; unsigned long flags; struct sk_buff *skb, *ctrl_skb; diff --git a/trunk/net/netfilter/nf_conntrack_core.c b/trunk/net/netfilter/nf_conntrack_core.c index a9638ff52a72..eaa0f8a1adb6 100644 --- a/trunk/net/netfilter/nf_conntrack_core.c +++ b/trunk/net/netfilter/nf_conntrack_core.c @@ -108,7 +108,7 @@ static struct { size_t size; /* slab cache pointer */ - struct kmem_cache *cachep; + kmem_cache_t *cachep; /* allocated slab cache + modules which uses this slab cache */ int use; @@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name, { int ret = 0; char *cache_name; - struct kmem_cache *cachep; + kmem_cache_t *cachep; DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", features, name, size); @@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ void nf_conntrack_unregister_cache(u_int32_t features) { - struct kmem_cache *cachep; + kmem_cache_t *cachep; char *name; /* diff --git a/trunk/net/netfilter/nf_conntrack_expect.c b/trunk/net/netfilter/nf_conntrack_expect.c index c20f901fa177..588d37937046 100644 --- a/trunk/net/netfilter/nf_conntrack_expect.c +++ b/trunk/net/netfilter/nf_conntrack_expect.c @@ -29,7 +29,7 @@ LIST_HEAD(nf_conntrack_expect_list); EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); -struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; +kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; static unsigned int nf_conntrack_expect_next_id; /* nf_conntrack_expect helper functions */ diff --git a/trunk/net/netfilter/xt_hashlimit.c b/trunk/net/netfilter/xt_hashlimit.c index a5a6e192ac2d..a98de0b54d65 100644 --- a/trunk/net/netfilter/xt_hashlimit.c +++ b/trunk/net/netfilter/xt_hashlimit.c @@ -92,7 +92,7 @@ struct xt_hashlimit_htable { static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static HLIST_HEAD(hashlimit_htables); -static struct kmem_cache *hashlimit_cachep __read_mostly; +static kmem_cache_t *hashlimit_cachep __read_mostly; static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) { diff --git a/trunk/net/rxrpc/krxiod.c b/trunk/net/rxrpc/krxiod.c index 49effd92144e..dada34a77b21 100644 --- a/trunk/net/rxrpc/krxiod.c +++ b/trunk/net/rxrpc/krxiod.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/net/rxrpc/krxsecd.c b/trunk/net/rxrpc/krxsecd.c index 3ab0f77409f4..cea4eb5e2497 100644 --- a/trunk/net/rxrpc/krxsecd.c +++ b/trunk/net/rxrpc/krxsecd.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include "internal.h" diff --git a/trunk/net/rxrpc/krxtimod.c b/trunk/net/rxrpc/krxtimod.c index 9a9b6132dba4..3e7466900bd4 100644 --- a/trunk/net/rxrpc/krxtimod.c +++ b/trunk/net/rxrpc/krxtimod.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index ad0057db0f91..39471d3b31b9 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -61,7 +61,7 @@ #include /* Forward declarations for internal functions. */ -static void sctp_assoc_bh_rcv(struct work_struct *work); +static void sctp_assoc_bh_rcv(struct sctp_association *asoc); /* 1st Level Abstractions. */ @@ -269,7 +269,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); - sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); + sctp_inq_set_th_handler(&asoc->base.inqueue, + (void (*)(void *))sctp_assoc_bh_rcv, + asoc); /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); @@ -944,11 +946,8 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, } /* Do delayed input processing. This is scheduled by sctp_rcv(). */ -static void sctp_assoc_bh_rcv(struct work_struct *work) +static void sctp_assoc_bh_rcv(struct sctp_association *asoc) { - struct sctp_association *asoc = - container_of(work, struct sctp_association, - base.inqueue.immediate); struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sock *sk; diff --git a/trunk/net/sctp/endpointola.c b/trunk/net/sctp/endpointola.c index 129756908da4..33a42e90c32f 100644 --- a/trunk/net/sctp/endpointola.c +++ b/trunk/net/sctp/endpointola.c @@ -61,7 +61,7 @@ #include /* Forward declarations for internal helpers. */ -static void sctp_endpoint_bh_rcv(struct work_struct *work); +static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); /* * Initialize the base fields of the endpoint structure. @@ -89,7 +89,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, sctp_inq_init(&ep->base.inqueue); /* Set its top-half handler */ - sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); + sctp_inq_set_th_handler(&ep->base.inqueue, + (void (*)(void *))sctp_endpoint_bh_rcv, ep); /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); @@ -317,11 +318,8 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, /* Do delayed input processing. This is scheduled by sctp_rcv(). * This may be called on BH or task time. */ -static void sctp_endpoint_bh_rcv(struct work_struct *work) +static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) { - struct sctp_endpoint *ep = - container_of(work, struct sctp_endpoint, - base.inqueue.immediate); struct sctp_association *asoc; struct sock *sk; struct sctp_transport *transport; diff --git a/trunk/net/sctp/inqueue.c b/trunk/net/sctp/inqueue.c index 71b07466e880..cf6deed7e849 100644 --- a/trunk/net/sctp/inqueue.c +++ b/trunk/net/sctp/inqueue.c @@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) queue->in_progress = NULL; /* Create a task for delivering data. */ - INIT_WORK(&queue->immediate, NULL); + INIT_WORK(&queue->immediate, NULL, NULL); queue->malloced = 0; } @@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) * on the BH related data structures. */ list_add_tail(&chunk->list, &q->in_chunk_list); - q->immediate.func(&q->immediate); + q->immediate.func(q->immediate.data); } /* Extract a chunk from an SCTP inqueue. @@ -205,8 +205,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) * The intent is that this routine will pull stuff out of the * inqueue and process it. */ -void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) +void sctp_inq_set_th_handler(struct sctp_inq *q, + void (*callback)(void *), void *arg) { - INIT_WORK(&q->immediate, callback); + INIT_WORK(&q->immediate, callback, arg); } diff --git a/trunk/net/sctp/protocol.c b/trunk/net/sctp/protocol.c index f2ba8615895b..11f3b549f4a4 100644 --- a/trunk/net/sctp/protocol.c +++ b/trunk/net/sctp/protocol.c @@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific; static struct sctp_af *sctp_af_v4_specific; static struct sctp_af *sctp_af_v6_specific; -struct kmem_cache *sctp_chunk_cachep __read_mostly; -struct kmem_cache *sctp_bucket_cachep __read_mostly; +kmem_cache_t *sctp_chunk_cachep __read_mostly; +kmem_cache_t *sctp_bucket_cachep __read_mostly; /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) diff --git a/trunk/net/sctp/sm_make_chunk.c b/trunk/net/sctp/sm_make_chunk.c index 30927d3a597f..04954e5f6846 100644 --- a/trunk/net/sctp/sm_make_chunk.c +++ b/trunk/net/sctp/sm_make_chunk.c @@ -65,7 +65,7 @@ #include #include -extern struct kmem_cache *sctp_chunk_cachep; +extern kmem_cache_t *sctp_chunk_cachep; SCTP_STATIC struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, @@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, { struct sctp_chunk *retval; - retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); + retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); if (!retval) goto nodata; diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 1e8132b8c4d9..02b27145b279 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; -extern struct kmem_cache *sctp_bucket_cachep; +extern kmem_cache_t *sctp_bucket_cachep; /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) @@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( { struct sctp_bind_bucket *pp; - pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); + pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); SCTP_DBG_OBJCNT_INC(bind_bucket); if (pp) { pp->port = snum; diff --git a/trunk/net/socket.c b/trunk/net/socket.c index 29ea1de43ecb..e8db54702a69 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, #define SOCKFS_MAGIC 0x534F434B -static struct kmem_cache *sock_inode_cachep __read_mostly; +static kmem_cache_t *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; - ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; init_waitqueue_head(&ei->socket.wait); @@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode) container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; @@ -305,14 +305,7 @@ static struct file_system_type sock_fs_type = { static int sockfs_delete_dentry(struct dentry *dentry) { - /* - * At creation time, we pretended this dentry was hashed - * (by clearing DCACHE_UNHASHED bit in d_flags) - * At delete time, we restore the truth : not hashed. - * (so that dput() can proceed correctly) - */ - dentry->d_flags |= DCACHE_UNHASHED; - return 0; + return 1; } static struct dentry_operations sockfs_dentry_operations = { .d_delete = sockfs_delete_dentry, @@ -360,20 +353,14 @@ static int sock_attach_fd(struct socket *sock, struct file *file) this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); this.name = name; - this.hash = 0; + this.hash = SOCK_INODE(sock)->i_ino; file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); if (unlikely(!file->f_dentry)) return -ENOMEM; file->f_dentry->d_op = &sockfs_dentry_operations; - /* - * We dont want to push this dentry into global dentry hash table. - * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED - * This permits a working /proc/$pid/fd/XXX on sockets - */ - file->f_dentry->d_flags &= ~DCACHE_UNHASHED; - d_instantiate(file->f_dentry, SOCK_INODE(sock)); + d_add(file->f_dentry, SOCK_INODE(sock)); file->f_vfsmnt = mntget(sock_mnt); file->f_mapping = file->f_dentry->d_inode->i_mapping; diff --git a/trunk/net/sunrpc/cache.c b/trunk/net/sunrpc/cache.c index d96fd466a9a4..00cb388ece03 100644 --- a/trunk/net/sunrpc/cache.c +++ b/trunk/net/sunrpc/cache.c @@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; static struct file_operations content_file_operations; static struct file_operations cache_flush_operations; -static void do_cache_clean(struct work_struct *work); -static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); +static void do_cache_clean(void *data); +static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); void cache_register(struct cache_detail *cd) { @@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) spin_unlock(&cache_list_lock); /* start the cleaning process */ - schedule_delayed_work(&cache_cleaner, 0); + schedule_work(&cache_cleaner); } int cache_unregister(struct cache_detail *cd) @@ -461,7 +461,7 @@ static int cache_clean(void) /* * We want to regularly clean the cache, so we need to schedule some work ... */ -static void do_cache_clean(struct work_struct *work) +static void do_cache_clean(void *data) { int delay = 5; if (cache_clean() == -1) diff --git a/trunk/net/sunrpc/rpc_pipe.c b/trunk/net/sunrpc/rpc_pipe.c index 19703aa9659e..9a0b41a97f90 100644 --- a/trunk/net/sunrpc/rpc_pipe.c +++ b/trunk/net/sunrpc/rpc_pipe.c @@ -33,7 +33,7 @@ static int rpc_mount_count; static struct file_system_type rpc_pipe_fs_type; -static struct kmem_cache *rpc_inode_cachep __read_mostly; +static kmem_cache_t *rpc_inode_cachep __read_mostly; #define RPC_UPCALL_TIMEOUT (30*HZ) @@ -54,11 +54,10 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, } static void -rpc_timeout_upcall_queue(struct work_struct *work) +rpc_timeout_upcall_queue(void *data) { LIST_HEAD(free_list); - struct rpc_inode *rpci = - container_of(work, struct rpc_inode, queue_timeout.work); + struct rpc_inode *rpci = (struct rpc_inode *)data; struct inode *inode = &rpci->vfs_inode; void (*destroy_msg)(struct rpc_pipe_msg *); @@ -143,7 +142,7 @@ static struct inode * rpc_alloc_inode(struct super_block *sb) { struct rpc_inode *rpci; - rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); + rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); if (!rpci) return NULL; return &rpci->vfs_inode; @@ -824,7 +823,7 @@ static struct file_system_type rpc_pipe_fs_type = { }; static void -init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) { struct rpc_inode *rpci = (struct rpc_inode *) foo; @@ -838,8 +837,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) INIT_LIST_HEAD(&rpci->pipe); rpci->pipelen = 0; init_waitqueue_head(&rpci->waitq); - INIT_DELAYED_WORK(&rpci->queue_timeout, - rpc_timeout_upcall_queue); + INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); rpci->ops = NULL; } } diff --git a/trunk/net/sunrpc/sched.c b/trunk/net/sunrpc/sched.c index 225e6510b523..a1ab4eed41f4 100644 --- a/trunk/net/sunrpc/sched.c +++ b/trunk/net/sunrpc/sched.c @@ -34,14 +34,14 @@ static int rpc_task_id; #define RPC_BUFFER_MAXSIZE (2048) #define RPC_BUFFER_POOLSIZE (8) #define RPC_TASK_POOLSIZE (8) -static struct kmem_cache *rpc_task_slabp __read_mostly; -static struct kmem_cache *rpc_buffer_slabp __read_mostly; +static kmem_cache_t *rpc_task_slabp __read_mostly; +static kmem_cache_t *rpc_buffer_slabp __read_mostly; static mempool_t *rpc_task_mempool __read_mostly; static mempool_t *rpc_buffer_mempool __read_mostly; static void __rpc_default_timer(struct rpc_task *task); static void rpciod_killall(void); -static void rpc_async_schedule(struct work_struct *); +static void rpc_async_schedule(void *); /* * RPC tasks sit here while waiting for conditions to improve. @@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task) if (RPC_IS_ASYNC(task)) { int status; - INIT_WORK(&task->u.tk_work, rpc_async_schedule); + INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); status = queue_work(task->tk_workqueue, &task->u.tk_work); if (status < 0) { printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); @@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task) return __rpc_execute(task); } -static void rpc_async_schedule(struct work_struct *work) +static void rpc_async_schedule(void *arg) { - __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); + __rpc_execute((struct rpc_task *)arg); } /** diff --git a/trunk/net/sunrpc/svcauth.c b/trunk/net/sunrpc/svcauth.c index c7bb5f7f21a5..ee9bb1522d5e 100644 --- a/trunk/net/sunrpc/svcauth.c +++ b/trunk/net/sunrpc/svcauth.c @@ -119,8 +119,7 @@ EXPORT_SYMBOL(svc_auth_unregister); #define DN_HASHMASK (DN_HASHMAX-1) static struct hlist_head auth_domain_table[DN_HASHMAX]; -static spinlock_t auth_domain_lock = - __SPIN_LOCK_UNLOCKED(auth_domain_lock); +static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; void auth_domain_put(struct auth_domain *dom) { diff --git a/trunk/net/sunrpc/svcsock.c b/trunk/net/sunrpc/svcsock.c index 99f54fb6d669..64ca1f61dd94 100644 --- a/trunk/net/sunrpc/svcsock.c +++ b/trunk/net/sunrpc/svcsock.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -85,35 +84,6 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req); */ static int svc_conn_age_period = 6*60; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key svc_key[2]; -static struct lock_class_key svc_slock_key[2]; - -static inline void svc_reclassify_socket(struct socket *sock) -{ - struct sock *sk = sock->sk; - BUG_ON(sk->sk_lock.owner != NULL); - switch (sk->sk_family) { - case AF_INET: - sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", - &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); - break; - - case AF_INET6: - sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", - &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); - break; - - default: - BUG(); - } -} -#else -static inline void svc_reclassify_socket(struct socket *sock) -{ -} -#endif - /* * Queue up an idle server thread. Must have pool->sp_lock held. * Note: this is really a stack rather than a queue, so that we only @@ -1586,8 +1556,6 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) return error; - svc_reclassify_socket(sock); - if (type == SOCK_STREAM) sock->sk->sk_reuse = 1; /* allow address reuse */ error = kernel_bind(sock, (struct sockaddr *) sin, diff --git a/trunk/net/sunrpc/xprt.c b/trunk/net/sunrpc/xprt.c index 4f9a5d9791fb..80857470dc11 100644 --- a/trunk/net/sunrpc/xprt.c +++ b/trunk/net/sunrpc/xprt.c @@ -479,10 +479,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -static void xprt_autoclose(struct work_struct *work) +static void xprt_autoclose(void *args) { - struct rpc_xprt *xprt = - container_of(work, struct rpc_xprt, task_cleanup); + struct rpc_xprt *xprt = (struct rpc_xprt *)args; xprt_disconnect(xprt); xprt->ops->close(xprt); @@ -933,7 +932,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->task_cleanup, xprt_autoclose); + INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; diff --git a/trunk/net/sunrpc/xprtsock.c b/trunk/net/sunrpc/xprtsock.c index 2fc4a3123261..757fc91ef25d 100644 --- a/trunk/net/sunrpc/xprtsock.c +++ b/trunk/net/sunrpc/xprtsock.c @@ -1058,45 +1058,15 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) return err; } -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key xs_key[2]; -static struct lock_class_key xs_slock_key[2]; - -static inline void xs_reclassify_socket(struct socket *sock) -{ - struct sock *sk = sock->sk; - BUG_ON(sk->sk_lock.owner != NULL); - switch (sk->sk_family) { - case AF_INET: - sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", - &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); - break; - - case AF_INET6: - sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", - &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); - break; - - default: - BUG(); - } -} -#else -static inline void xs_reclassify_socket(struct socket *sock) -{ -} -#endif - /** * xs_udp_connect_worker - set up a UDP socket - * @work: RPC transport to connect + * @args: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_udp_connect_worker(struct work_struct *work) +static void xs_udp_connect_worker(void *args) { - struct rpc_xprt *xprt = - container_of(work, struct rpc_xprt, connect_worker.work); + struct rpc_xprt *xprt = (struct rpc_xprt *) args; struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1110,7 +1080,6 @@ static void xs_udp_connect_worker(struct work_struct *work) dprintk("RPC: can't create UDP transport socket (%d).\n", -err); goto out; } - xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { sock_release(sock); @@ -1175,14 +1144,13 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) /** * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint - * @work: RPC transport to connect + * @args: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_tcp_connect_worker(struct work_struct *work) +static void xs_tcp_connect_worker(void *args) { - struct rpc_xprt *xprt = - container_of(work, struct rpc_xprt, connect_worker.work); + struct rpc_xprt *xprt = (struct rpc_xprt *)args; struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1195,7 +1163,6 @@ static void xs_tcp_connect_worker(struct work_struct *work) dprintk("RPC: can't create TCP transport socket (%d).\n", -err); goto out; } - xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { sock_release(sock); @@ -1295,7 +1262,7 @@ static void xs_connect(struct rpc_task *task) xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_delayed_work(&xprt->connect_worker, 0); + schedule_work(&xprt->connect_worker); /* flush_scheduled_work can sleep... */ if (!RPC_IS_ASYNC(task)) @@ -1408,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); + INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; @@ -1453,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; - INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); + INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; diff --git a/trunk/net/tipc/handler.c b/trunk/net/tipc/handler.c index eb80778d6d9c..ae6ddf00a1aa 100644 --- a/trunk/net/tipc/handler.c +++ b/trunk/net/tipc/handler.c @@ -42,7 +42,7 @@ struct queue_item { unsigned long data; }; -static struct kmem_cache *tipc_queue_item_cache; +static kmem_cache_t *tipc_queue_item_cache; static struct list_head signal_queue_head; static DEFINE_SPINLOCK(qitem_lock); static int handler_enabled = 0; diff --git a/trunk/net/xfrm/xfrm_input.c b/trunk/net/xfrm/xfrm_input.c index 414f89070380..e8198a2c785d 100644 --- a/trunk/net/xfrm/xfrm_input.c +++ b/trunk/net/xfrm/xfrm_input.c @@ -12,7 +12,7 @@ #include #include -static struct kmem_cache *secpath_cachep __read_mostly; +static kmem_cache_t *secpath_cachep __read_mostly; void __secpath_destroy(struct sec_path *sp) { @@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; - sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); + sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC); if (!sp) return NULL; diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index 3f3f563eb4ab..64d3938f74c4 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL(xfrm_policy_count); static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; -static struct kmem_cache *xfrm_dst_cache __read_mostly; +static kmem_cache_t *xfrm_dst_cache __read_mostly; static struct work_struct xfrm_policy_gc_work; static HLIST_HEAD(xfrm_policy_gc_list); @@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) xfrm_pol_put(policy); } -static void xfrm_policy_gc_task(struct work_struct *work) +static void xfrm_policy_gc_task(void *data) { struct xfrm_policy *policy; struct hlist_node *entry, *tmp; @@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(struct work_struct *__unused) +static void xfrm_hash_resize(void *__unused) { int dir, total; @@ -597,7 +597,7 @@ static void xfrm_hash_resize(struct work_struct *__unused) mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); /* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */ @@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) panic("XFRM: failed to allocate bydst hash\n"); } - INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); + INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); register_netdevice_notifier(&xfrm_dev_notifier); } diff --git a/trunk/net/xfrm/xfrm_state.c b/trunk/net/xfrm/xfrm_state.c index da54a64ccfa3..864962bbda90 100644 --- a/trunk/net/xfrm/xfrm_state.c +++ b/trunk/net/xfrm/xfrm_state.c @@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(struct work_struct *__unused) +static void xfrm_hash_resize(void *__unused) { struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; unsigned long nsize, osize; @@ -168,7 +168,7 @@ static void xfrm_hash_resize(struct work_struct *__unused) mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); DECLARE_WAIT_QUEUE_HEAD(km_waitq); EXPORT_SYMBOL(km_waitq); @@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) kfree(x); } -static void xfrm_state_gc_task(struct work_struct *data) +static void xfrm_state_gc_task(void *data) { struct xfrm_state *x; struct hlist_node *entry, *tmp; @@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); - INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); + INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); } diff --git a/trunk/scripts/kconfig/qconf.cc b/trunk/scripts/kconfig/qconf.cc index f5628c57640b..338bdea96541 100644 --- a/trunk/scripts/kconfig/qconf.cc +++ b/trunk/scripts/kconfig/qconf.cc @@ -798,7 +798,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) QAction *action; headerPopup = new QPopupMenu(this); - action = new QAction(NULL, "Show Name", 0, this); + action = new QAction("Show Name", 0, this); action->setToggleAction(TRUE); connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowName(bool))); @@ -806,7 +806,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) action, SLOT(setOn(bool))); action->setOn(showName); action->addTo(headerPopup); - action = new QAction(NULL, "Show Range", 0, this); + action = new QAction("Show Range", 0, this); action->setToggleAction(TRUE); connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowRange(bool))); @@ -814,7 +814,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) action, SLOT(setOn(bool))); action->setOn(showRange); action->addTo(headerPopup); - action = new QAction(NULL, "Show Data", 0, this); + action = new QAction("Show Data", 0, this); action->setToggleAction(TRUE); connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowData(bool))); @@ -1161,7 +1161,7 @@ void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos) { QPopupMenu* popup = Parent::createPopupMenu(pos); - QAction* action = new QAction(NULL,"Show Debug Info", 0, popup); + QAction* action = new QAction("Show Debug Info", 0, popup); action->setToggleAction(TRUE); connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); diff --git a/trunk/scripts/kernel-doc b/trunk/scripts/kernel-doc index df3b272f7ce6..187f5de4612c 100755 --- a/trunk/scripts/kernel-doc +++ b/trunk/scripts/kernel-doc @@ -1430,7 +1430,7 @@ sub create_parameterlist($$$) { # corresponding data structures "correctly". Catch it later in # output_* subs. push_parameter($arg, "", $file); - } elsif ($arg =~ m/\(.*\*/) { + } elsif ($arg =~ m/\(/) { # pointer-to-function $arg =~ tr/#/,/; $arg =~ m/[^\(]+\(\*([^\)]+)\)/; diff --git a/trunk/scripts/ver_linux b/trunk/scripts/ver_linux index 72876dfadc8a..84999f69773d 100755 --- a/trunk/scripts/ver_linux +++ b/trunk/scripts/ver_linux @@ -48,8 +48,6 @@ fsck.reiser4 -V 2>&1 | grep ^fsck.reiser4 | awk \ xfs_db -V 2>&1 | grep version | awk \ 'NR==1{print "xfsprogs ", $3}' -pccardctl -V 2>&1| grep pcmciautils | awk '{print "pcmciautils ", $2}' - cardmgr -V 2>&1| grep version | awk \ 'NR==1{print "pcmcia-cs ", $3}' @@ -89,16 +87,10 @@ loadkeys -h 2>&1 | awk \ loadkeys -V 2>&1 | awk \ '(NR==1 && ($2 ~ /console-tools/)) {print "Console-tools ", $3}' -oprofiled --version 2>&1 | awk \ -'(NR==1 && ($2 == "oprofile")) {print "oprofile ", $3}' - expr --v 2>&1 | awk 'NR==1{print "Sh-utils ", $NF}' udevinfo -V 2>&1 | grep version | awk '{print "udev ", $3}' -iwconfig --version 2>&1 | awk \ -'(NR==1 && ($3 == "version")) {print "wireless-tools ",$4}' - if [ -e /proc/modules ]; then X=`cat /proc/modules | sed -e "s/ .*$//"` echo "Modules Loaded "$X diff --git a/trunk/security/keys/key.c b/trunk/security/keys/key.c index ac9326c5f1da..80de8c3e9cc3 100644 --- a/trunk/security/keys/key.c +++ b/trunk/security/keys/key.c @@ -20,7 +20,7 @@ #include #include "internal.h" -static struct kmem_cache *key_jar; +static kmem_cache_t *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); @@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock); static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); -static void key_cleanup(struct work_struct *work); -static DECLARE_WORK(key_cleanup_task, key_cleanup); +static void key_cleanup(void *data); +static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); /* we serialise key instantiation and link */ DECLARE_RWSEM(key_construction_sem); @@ -285,14 +285,16 @@ struct key *key_alloc(struct key_type *type, const char *desc, } /* allocate and initialise the key and its description */ - key = kmem_cache_alloc(key_jar, GFP_KERNEL); + key = kmem_cache_alloc(key_jar, SLAB_KERNEL); if (!key) goto no_memory_2; if (desc) { - key->description = kmemdup(desc, desclen, GFP_KERNEL); + key->description = kmalloc(desclen, GFP_KERNEL); if (!key->description) goto no_memory_3; + + memcpy(key->description, desc, desclen); } atomic_set(&key->usage, 1); @@ -550,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link); * do cleaning up in process context so that we don't have to disable * interrupts all over the place */ -static void key_cleanup(struct work_struct *work) +static void key_cleanup(void *data) { struct rb_node *_n; struct key *key; diff --git a/trunk/security/keys/keyring.c b/trunk/security/keys/keyring.c index ad45ce73964b..e8d02acc51e7 100644 --- a/trunk/security/keys/keyring.c +++ b/trunk/security/keys/keyring.c @@ -706,10 +706,12 @@ int __key_link(struct key *keyring, struct key *key) BUG_ON(size > PAGE_SIZE); ret = -ENOMEM; - nklist = kmemdup(klist, size, GFP_KERNEL); + nklist = kmalloc(size, GFP_KERNEL); if (!nklist) goto error2; + memcpy(nklist, klist, size); + /* replace matched key */ atomic_inc(&key->usage); nklist->keys[loop] = key; diff --git a/trunk/security/keys/process_keys.c b/trunk/security/keys/process_keys.c index b6f86808475a..32150cf7c37f 100644 --- a/trunk/security/keys/process_keys.c +++ b/trunk/security/keys/process_keys.c @@ -27,7 +27,7 @@ static DEFINE_MUTEX(key_session_mutex); struct key_user root_key_user = { .usage = ATOMIC_INIT(3), .consq = LIST_HEAD_INIT(root_key_user.consq), - .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), + .lock = SPIN_LOCK_UNLOCKED, .nkeys = ATOMIC_INIT(2), .nikeys = ATOMIC_INIT(2), .uid = 0, diff --git a/trunk/security/selinux/avc.c b/trunk/security/selinux/avc.c index e7c0b5e2066b..e73ac1ab7cfd 100644 --- a/trunk/security/selinux/avc.c +++ b/trunk/security/selinux/avc.c @@ -124,7 +124,7 @@ DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; static struct avc_cache avc_cache; static struct avc_callback_node *avc_callbacks; -static struct kmem_cache *avc_node_cachep; +static kmem_cache_t *avc_node_cachep; static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { @@ -332,7 +332,7 @@ static struct avc_node *avc_alloc_node(void) { struct avc_node *node; - node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC); + node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC); if (!node) goto out; diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c index 44e9cd470543..78f98fe084eb 100644 --- a/trunk/security/selinux/hooks.c +++ b/trunk/security/selinux/hooks.c @@ -124,7 +124,7 @@ static struct security_operations *secondary_ops = NULL; static LIST_HEAD(superblock_security_head); static DEFINE_SPINLOCK(sb_security_lock); -static struct kmem_cache *sel_inode_cache; +static kmem_cache_t *sel_inode_cache; /* Return security context for a given sid or just the context length if the buffer is null or length is 0 */ @@ -181,7 +181,7 @@ static int inode_alloc_security(struct inode *inode) struct task_security_struct *tsec = current->security; struct inode_security_struct *isec; - isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL); + isec = kmem_cache_alloc(sel_inode_cache, SLAB_KERNEL); if (!isec) return -ENOMEM; diff --git a/trunk/security/selinux/ss/avtab.c b/trunk/security/selinux/ss/avtab.c index ebb993c5c244..d049c7acbc8b 100644 --- a/trunk/security/selinux/ss/avtab.c +++ b/trunk/security/selinux/ss/avtab.c @@ -28,7 +28,7 @@ (keyp->source_type << 9)) & \ AVTAB_HASH_MASK) -static struct kmem_cache *avtab_node_cachep; +static kmem_cache_t *avtab_node_cachep; static struct avtab_node* avtab_insert_node(struct avtab *h, int hvalue, @@ -36,7 +36,7 @@ avtab_insert_node(struct avtab *h, int hvalue, struct avtab_key *key, struct avtab_datum *datum) { struct avtab_node * newnode; - newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL); + newnode = kmem_cache_alloc(avtab_node_cachep, SLAB_KERNEL); if (newnode == NULL) return NULL; memset(newnode, 0, sizeof(struct avtab_node)); diff --git a/trunk/sound/aoa/aoa-gpio.h b/trunk/sound/aoa/aoa-gpio.h index ee64f5de8966..3a61f3115573 100644 --- a/trunk/sound/aoa/aoa-gpio.h +++ b/trunk/sound/aoa/aoa-gpio.h @@ -59,10 +59,10 @@ struct gpio_methods { }; struct gpio_notification { - struct delayed_work work; notify_func_t notify; void *data; void *gpio_private; + struct work_struct work; struct mutex mutex; }; diff --git a/trunk/sound/aoa/core/snd-aoa-gpio-feature.c b/trunk/sound/aoa/core/snd-aoa-gpio-feature.c index 2b03bc798bcb..40eb47eccf9a 100644 --- a/trunk/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/trunk/sound/aoa/core/snd-aoa-gpio-feature.c @@ -195,10 +195,9 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt) ftr_gpio_set_lineout(rt, (s>>2)&1); } -static void ftr_handle_notify(struct work_struct *work) +static void ftr_handle_notify(void *data) { - struct gpio_notification *notif = - container_of(work, struct gpio_notification, work.work); + struct gpio_notification *notif = data; mutex_lock(¬if->mutex); if (notif->notify) @@ -254,9 +253,12 @@ static void ftr_gpio_init(struct gpio_runtime *rt) ftr_gpio_all_amps_off(rt); rt->implementation_private = 0; - INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); - INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); - INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); + INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, + &rt->headphone_notify); + INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, + &rt->line_in_notify); + INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify, + &rt->line_out_notify); mutex_init(&rt->headphone_notify.mutex); mutex_init(&rt->line_in_notify.mutex); mutex_init(&rt->line_out_notify.mutex); @@ -285,7 +287,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data) { struct gpio_notification *notif = data; - schedule_delayed_work(¬if->work, 0); + schedule_work(¬if->work); return IRQ_HANDLED; } diff --git a/trunk/sound/aoa/core/snd-aoa-gpio-pmf.c b/trunk/sound/aoa/core/snd-aoa-gpio-pmf.c index 5ca2220eac7d..2836c3218391 100644 --- a/trunk/sound/aoa/core/snd-aoa-gpio-pmf.c +++ b/trunk/sound/aoa/core/snd-aoa-gpio-pmf.c @@ -69,10 +69,9 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) pmf_gpio_set_lineout(rt, (s>>2)&1); } -static void pmf_handle_notify(struct work_struct *work) +static void pmf_handle_notify(void *data) { - struct gpio_notification *notif = - container_of(work, struct gpio_notification, work.work); + struct gpio_notification *notif = data; mutex_lock(¬if->mutex); if (notif->notify) @@ -84,9 +83,12 @@ static void pmf_gpio_init(struct gpio_runtime *rt) { pmf_gpio_all_amps_off(rt); rt->implementation_private = 0; - INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); - INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); - INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); + INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, + &rt->headphone_notify); + INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, + &rt->line_in_notify); + INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify, + &rt->line_out_notify); mutex_init(&rt->headphone_notify.mutex); mutex_init(&rt->line_in_notify.mutex); mutex_init(&rt->line_out_notify.mutex); @@ -127,7 +129,7 @@ static void pmf_handle_notify_irq(void *data) { struct gpio_notification *notif = data; - schedule_delayed_work(¬if->work, 0); + schedule_work(¬if->work); } static int pmf_set_notify(struct gpio_runtime *rt, diff --git a/trunk/sound/arm/sa11xx-uda1341.c b/trunk/sound/arm/sa11xx-uda1341.c index c7e1b2646193..c79a9afd0955 100644 --- a/trunk/sound/arm/sa11xx-uda1341.c +++ b/trunk/sound/arm/sa11xx-uda1341.c @@ -125,7 +125,7 @@ struct audio_stream { #else dma_regs_t *dma_regs; /* points to our DMA registers */ #endif - unsigned int active:1; /* we are using this stream for transfer now */ + int active:1; /* we are using this stream for transfer now */ int period; /* current transfer period */ int periods; /* current count of periods registerd in the DMA engine */ int tx_spin; /* are we recoding - flag used to do DMA trans. for sync */ diff --git a/trunk/sound/core/pcm_native.c b/trunk/sound/core/pcm_native.c index 6ea67b16c676..66e24b5da469 100644 --- a/trunk/sound/core/pcm_native.c +++ b/trunk/sound/core/pcm_native.c @@ -3027,7 +3027,7 @@ static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, struct page * page; if (substream == NULL) - return NOPAGE_SIGBUS; + return NOPAGE_OOM; runtime = substream->runtime; page = virt_to_page(runtime->status); get_page(page); @@ -3070,7 +3070,7 @@ static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, struct page * page; if (substream == NULL) - return NOPAGE_SIGBUS; + return NOPAGE_OOM; runtime = substream->runtime; page = virt_to_page(runtime->control); get_page(page); @@ -3131,18 +3131,18 @@ static struct page *snd_pcm_mmap_data_nopage(struct vm_area_struct *area, size_t dma_bytes; if (substream == NULL) - return NOPAGE_SIGBUS; + return NOPAGE_OOM; runtime = substream->runtime; offset = area->vm_pgoff << PAGE_SHIFT; offset += address - area->vm_start; - snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS); + snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM); dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if (offset > dma_bytes - PAGE_SIZE) return NOPAGE_SIGBUS; if (substream->ops->page) { page = substream->ops->page(substream, offset); if (! page) - return NOPAGE_OOM; /* XXX: is this really due to OOM? */ + return NOPAGE_OOM; } else { vaddr = runtime->dma_area + offset; page = virt_to_page(vaddr); diff --git a/trunk/sound/i2c/other/ak4114.c b/trunk/sound/i2c/other/ak4114.c index d2f2c5078e65..12ffffc9e814 100644 --- a/trunk/sound/i2c/other/ak4114.c +++ b/trunk/sound/i2c/other/ak4114.c @@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); #define AK4114_ADDR 0x00 /* fixed address */ -static void ak4114_stats(struct work_struct *work); +static void ak4114_stats(void *); static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) { @@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip) reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); /* bring up statistics / event queing */ chip->init = 0; - INIT_DELAYED_WORK(&chip->work, ak4114_stats); + INIT_WORK(&chip->work, ak4114_stats, chip); queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); } @@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags) return res; } -static void ak4114_stats(struct work_struct *work) +static void ak4114_stats(void *data) { - struct ak4114 *chip = container_of(work, struct ak4114, work.work); + struct ak4114 *chip = (struct ak4114 *)data; if (chip->init) return; diff --git a/trunk/sound/oss/Kconfig b/trunk/sound/oss/Kconfig index a0588c21324a..cc2b9ab7f4e5 100644 --- a/trunk/sound/oss/Kconfig +++ b/trunk/sound/oss/Kconfig @@ -5,6 +5,20 @@ # # Prompt user for primary drivers. +config OSS_OBSOLETE_DRIVER + bool "Obsolete OSS drivers" + depends on SOUND_PRIME + help + This option enables support for obsolete OSS drivers that + are scheduled for removal in the near future since there + are ALSA drivers for the same hardware. + + Please contact Adrian Bunk if you had to + say Y here because your soundcard is not properly supported + by ALSA. + + If unsure, say N. + config SOUND_BT878 tristate "BT878 audio dma" depends on SOUND_PRIME && PCI @@ -21,6 +35,40 @@ config SOUND_BT878 To compile this driver as a module, choose M here: the module will be called btaudio. +config SOUND_EMU10K1 + tristate "Creative SBLive! (EMU10K1)" + depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER + ---help--- + Say Y or M if you have a PCI sound card using the EMU10K1 chipset, + such as the Creative SBLive!, SB PCI512 or Emu-APS. + + For more information on this driver and the degree of support for + the different card models please check: + + + + It is now possible to load dsp microcode patches into the EMU10K1 + chip. These patches are used to implement real time sound + processing effects which include for example: signal routing, + bass/treble control, AC3 passthrough, ... + Userspace tools to create new patches and load/unload them can be + found in the emu-tools package at the above URL. + +config MIDI_EMU10K1 + bool "Creative SBLive! MIDI (EXPERIMENTAL)" + depends on SOUND_EMU10K1 && EXPERIMENTAL && ISA_DMA_API + help + Say Y if you want to be able to use the OSS /dev/sequencer + interface. This code is still experimental. + +config SOUND_FUSION + tristate "Crystal SoundFusion (CS4280/461x)" + depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER + help + This module drives the Crystal SoundFusion devices (CS4280/46xx + series) when wired as native sound drivers with AC97 codecs. If + this driver does not work try the CS4232 driver. + config SOUND_BCM_CS4297A tristate "Crystal Sound CS4297a (for Swarm)" depends on SOUND_PRIME && SIBYTE_SWARM @@ -400,6 +448,47 @@ config SOUND_DMAP Say Y unless you have 16MB or more RAM or a PCI sound card. +config SOUND_AD1816 + tristate "AD1816(A) based cards (EXPERIMENTAL)" + depends on EXPERIMENTAL && SOUND_OSS && OSS_OBSOLETE_DRIVER + help + Say M here if you have a sound card based on the Analog Devices + AD1816(A) chip. + + If you compile the driver into the kernel, you have to add + "ad1816=,,," to the kernel command line. + +config SOUND_AD1889 + tristate "AD1889 based cards (AD1819 codec) (EXPERIMENTAL)" + depends on EXPERIMENTAL && SOUND_OSS && PCI && OSS_OBSOLETE_DRIVER + help + Say M here if you have a sound card based on the Analog Devices + AD1889 chip. + +config SOUND_ADLIB + tristate "Adlib Cards" + depends on SOUND_OSS && OSS_OBSOLETE_DRIVER + help + Includes ASB 64 4D. Information on programming AdLib cards is + available at . + +config SOUND_ACI_MIXER + tristate "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20)" + depends on SOUND_OSS && OSS_OBSOLETE_DRIVER + ---help--- + ACI (Audio Command Interface) is a protocol used to communicate with + the microcontroller on some sound cards produced by miro and + Cardinal Technologies. The main function of the ACI is to control + the mixer and to get a product identification. + + This VoxWare ACI driver currently supports the ACI functions on the + miroSOUND PCM1-pro, PCM12 and PCM20 radio. On the PCM20 radio, ACI + also controls the radio tuner. This is supported in the video4linux + miropcm20 driver (say M or Y here and go back to "Multimedia + devices" -> "Radio Adapters"). + + This driver is also available as a module and will be called aci. + config SOUND_CS4232 tristate "Crystal CS4232 based (PnP) cards" depends on SOUND_OSS @@ -505,6 +594,18 @@ config SOUND_MPU401 If you compile the driver into the kernel, you have to add "mpu401=," to the kernel command line. +config SOUND_NM256 + tristate "NM256AV/NM256ZX audio support" + depends on SOUND_OSS && OSS_OBSOLETE_DRIVER + help + Say M here to include audio support for the NeoMagic 256AV/256ZX + chipsets. These are the audio chipsets found in the Sony + Z505S/SX/DX, some Sony F-series, and the Dell Latitude CPi and CPt + laptops. It includes support for an AC97-compatible mixer and an + apparently proprietary sound engine. + + See for further information. + config SOUND_PAS tristate "ProAudioSpectrum 16 support" depends on SOUND_OSS @@ -613,6 +714,20 @@ config SOUND_YM3812 If unsure, say Y. +config SOUND_OPL3SA2 + tristate "Yamaha OPL3-SA2 and SA3 based PnP cards" + depends on SOUND_OSS && OSS_OBSOLETE_DRIVER + help + Say Y or M if you have a card based on one of these Yamaha sound + chipsets or the "SAx", which is actually a SA3. Read + for more information on + configuring these cards. + + If you compile the driver into the kernel and do not also + configure in the optional ISA PnP support, you will have to add + "opl3sa2=,,,,," to the kernel + command line. + config SOUND_UART6850 tristate "6850 UART support" depends on SOUND_OSS diff --git a/trunk/sound/oss/btaudio.c b/trunk/sound/oss/btaudio.c index ad7210a00dc0..6ad384114239 100644 --- a/trunk/sound/oss/btaudio.c +++ b/trunk/sound/oss/btaudio.c @@ -1020,7 +1020,6 @@ static int __devinit btaudio_probe(struct pci_dev *pci_dev, fail2: free_irq(bta->irq,bta); fail1: - iounmap(bta->mmio); kfree(bta); fail0: release_mem_region(pci_resource_start(pci_dev,0), @@ -1052,7 +1051,6 @@ static void __devexit btaudio_remove(struct pci_dev *pci_dev) free_irq(bta->irq,bta); release_mem_region(pci_resource_start(pci_dev,0), pci_resource_len(pci_dev,0)); - iounmap(bta->mmio); /* remove from linked list */ if (bta == btaudios) { diff --git a/trunk/sound/oss/emu10k1/audio.c b/trunk/sound/oss/emu10k1/audio.c index 49f902f35c28..86dd23974e05 100644 --- a/trunk/sound/oss/emu10k1/audio.c +++ b/trunk/sound/oss/emu10k1/audio.c @@ -111,15 +111,9 @@ static ssize_t emu10k1_audio_read(struct file *file, char __user *buffer, size_t if ((bytestocopy >= wiinst->buffer.fragment_size) || (bytestocopy >= count)) { - int rc; - bytestocopy = min_t(u32, bytestocopy, count); - rc = emu10k1_wavein_xferdata(wiinst, - (u8 __user *)buffer, - &bytestocopy); - if (rc) - return rc; + emu10k1_wavein_xferdata(wiinst, (u8 __user *)buffer, &bytestocopy); count -= bytestocopy; buffer += bytestocopy; diff --git a/trunk/sound/oss/emu10k1/cardwi.c b/trunk/sound/oss/emu10k1/cardwi.c index 060d1be94d33..8bbf44b881b4 100644 --- a/trunk/sound/oss/emu10k1/cardwi.c +++ b/trunk/sound/oss/emu10k1/cardwi.c @@ -304,12 +304,11 @@ void emu10k1_wavein_getxfersize(struct wiinst *wiinst, u32 * size) } } -static int copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov) +static void copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov) { - if (cov == 1) { - if (__copy_to_user(dst, src + str, len)) - return -EFAULT; - } else { + if (cov == 1) + __copy_to_user(dst, src + str, len); + else { u8 byte; u32 i; @@ -317,26 +316,22 @@ static int copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov) for (i = 0; i < len; i++) { byte = src[2 * i] ^ 0x80; - if (__copy_to_user(dst + i, &byte, 1)) - return -EFAULT; + __copy_to_user(dst + i, &byte, 1); } } - - return 0; } -int emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size) +void emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size) { struct wavein_buffer *buffer = &wiinst->buffer; u32 sizetocopy, sizetocopy_now, start; unsigned long flags; - int ret; sizetocopy = min_t(u32, buffer->size, *size); *size = sizetocopy; if (!sizetocopy) - return 0; + return; spin_lock_irqsave(&wiinst->lock, flags); start = buffer->pos; @@ -350,17 +345,11 @@ int emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size) if (sizetocopy > sizetocopy_now) { sizetocopy -= sizetocopy_now; - ret = copy_block(data, buffer->addr, start, sizetocopy_now, - buffer->cov); - if (ret == 0) - ret = copy_block(data + sizetocopy_now, buffer->addr, 0, - sizetocopy, buffer->cov); + copy_block(data, buffer->addr, start, sizetocopy_now, buffer->cov); + copy_block(data + sizetocopy_now, buffer->addr, 0, sizetocopy, buffer->cov); } else { - ret = copy_block(data, buffer->addr, start, sizetocopy, - buffer->cov); + copy_block(data, buffer->addr, start, sizetocopy, buffer->cov); } - - return ret; } void emu10k1_wavein_update(struct emu10k1_card *card, struct wiinst *wiinst) diff --git a/trunk/sound/oss/emu10k1/cardwi.h b/trunk/sound/oss/emu10k1/cardwi.h index e82029b46ad1..15cfb9b35596 100644 --- a/trunk/sound/oss/emu10k1/cardwi.h +++ b/trunk/sound/oss/emu10k1/cardwi.h @@ -83,7 +83,7 @@ void emu10k1_wavein_close(struct emu10k1_wavedevice *); void emu10k1_wavein_start(struct emu10k1_wavedevice *); void emu10k1_wavein_stop(struct emu10k1_wavedevice *); void emu10k1_wavein_getxfersize(struct wiinst *, u32 *); -int emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *); +void emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *); int emu10k1_wavein_setformat(struct emu10k1_wavedevice *, struct wave_format *); void emu10k1_wavein_update(struct emu10k1_card *, struct wiinst *); diff --git a/trunk/sound/oss/emu10k1/passthrough.c b/trunk/sound/oss/emu10k1/passthrough.c index 6d21d4368dec..4e3baca7d41f 100644 --- a/trunk/sound/oss/emu10k1/passthrough.c +++ b/trunk/sound/oss/emu10k1/passthrough.c @@ -162,15 +162,12 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co DPD(3, "prepend size %d, prepending %d bytes\n", pt->prepend_size, needed); if (count < needed) { - if (copy_from_user(pt->buf + pt->prepend_size, - buffer, count)) - return -EFAULT; + copy_from_user(pt->buf + pt->prepend_size, buffer, count); pt->prepend_size += count; DPD(3, "prepend size now %d\n", pt->prepend_size); return count; } - if (copy_from_user(pt->buf + pt->prepend_size, buffer, needed)) - return -EFAULT; + copy_from_user(pt->buf + pt->prepend_size, buffer, needed); r = pt_putblock(wave_dev, (u16 *) pt->buf, nonblock); if (r) return r; @@ -181,8 +178,7 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co blocks_copied = 0; while (blocks > 0) { u16 __user *bufptr = (u16 __user *) buffer + (bytes_copied/2); - if (copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE)) - return -EFAULT; + copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE); r = pt_putblock(wave_dev, (u16 *)pt->buf, nonblock); if (r) { if (bytes_copied) @@ -197,8 +193,7 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co i = count - bytes_copied; if (i) { pt->prepend_size = i; - if (copy_from_user(pt->buf, buffer + bytes_copied, i)) - return -EFAULT; + copy_from_user(pt->buf, buffer + bytes_copied, i); bytes_copied += i; DPD(3, "filling prepend buffer with %d bytes", i); } diff --git a/trunk/sound/oss/via82cxxx_audio.c b/trunk/sound/oss/via82cxxx_audio.c index c96cc8c68b3b..17837d4b5ed3 100644 --- a/trunk/sound/oss/via82cxxx_audio.c +++ b/trunk/sound/oss/via82cxxx_audio.c @@ -2120,8 +2120,8 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, return NOPAGE_SIGBUS; /* Disallow mremap */ } if (!card) { - DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n"); - return NOPAGE_SIGBUS; /* Nothing allocated */ + DPRINTK ("EXIT, returning NOPAGE_OOM\n"); + return NOPAGE_OOM; /* Nothing allocated */ } pgoff = vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT); diff --git a/trunk/sound/pci/ac97/ac97_codec.c b/trunk/sound/pci/ac97/ac97_codec.c index 7abcb10b2754..6577b2325357 100644 --- a/trunk/sound/pci/ac97/ac97_codec.c +++ b/trunk/sound/pci/ac97/ac97_codec.c @@ -1927,10 +1927,9 @@ static int snd_ac97_dev_disconnect(struct snd_device *device) static struct snd_ac97_build_ops null_build_ops; #ifdef CONFIG_SND_AC97_POWER_SAVE -static void do_update_power(struct work_struct *work) +static void do_update_power(void *data) { - update_power_regs( - container_of(work, struct snd_ac97, power_work.work)); + update_power_regs(data); } #endif @@ -1990,7 +1989,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template, mutex_init(&ac97->page_mutex); #ifdef CONFIG_SND_AC97_POWER_SAVE ac97->power_workq = create_workqueue("ac97"); - INIT_DELAYED_WORK(&ac97->power_work, do_update_power); + INIT_WORK(&ac97->power_work, do_update_power, ac97); #endif #ifdef CONFIG_PCI diff --git a/trunk/sound/pci/hda/hda_codec.c b/trunk/sound/pci/hda/hda_codec.c index 71482c15a852..9c3d7ac08068 100644 --- a/trunk/sound/pci/hda/hda_codec.c +++ b/trunk/sound/pci/hda/hda_codec.c @@ -272,11 +272,10 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event); /* * process queueud unsolicited events */ -static void process_unsol_events(struct work_struct *work) +static void process_unsol_events(void *data) { - struct hda_bus_unsolicited *unsol = - container_of(work, struct hda_bus_unsolicited, work); - struct hda_bus *bus = unsol->bus; + struct hda_bus *bus = data; + struct hda_bus_unsolicited *unsol = bus->unsol; struct hda_codec *codec; unsigned int rp, caddr, res; @@ -315,8 +314,7 @@ static int init_unsol_queue(struct hda_bus *bus) kfree(unsol); return -ENOMEM; } - INIT_WORK(&unsol->work, process_unsol_events); - unsol->bus = bus; + INIT_WORK(&unsol->work, process_unsol_events, bus); bus->unsol = unsol; return 0; } diff --git a/trunk/sound/pci/hda/hda_local.h b/trunk/sound/pci/hda/hda_local.h index 9ca1baf860bd..f9416c36396e 100644 --- a/trunk/sound/pci/hda/hda_local.h +++ b/trunk/sound/pci/hda/hda_local.h @@ -206,7 +206,6 @@ struct hda_bus_unsolicited { /* workqueue */ struct workqueue_struct *workq; struct work_struct work; - struct hda_bus *bus; }; /* diff --git a/trunk/sound/ppc/tumbler.c b/trunk/sound/ppc/tumbler.c index 8f074c7936e6..2fbe1d183fce 100644 --- a/trunk/sound/ppc/tumbler.c +++ b/trunk/sound/ppc/tumbler.c @@ -942,11 +942,10 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int } static struct work_struct device_change; -static struct snd_pmac *device_change_chip; -static void device_change_handler(struct work_struct *work) +static void device_change_handler(void *self) { - struct snd_pmac *chip = device_change_chip; + struct snd_pmac *chip = self; struct pmac_tumbler *mix; int headphone, lineout; @@ -1418,8 +1417,7 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip) chip->resume = tumbler_resume; #endif - INIT_WORK(&device_change, device_change_handler); - device_change_chip = chip; + INIT_WORK(&device_change, device_change_handler, (void *)chip); #ifdef PMAC_SUPPORT_AUTOMUTE if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) diff --git a/trunk/sound/usb/usx2y/usX2Yhwdep.c b/trunk/sound/usb/usx2y/usX2Yhwdep.c index b76b3dd9df25..4b52d18dcd53 100644 --- a/trunk/sound/usb/usx2y/usX2Yhwdep.c +++ b/trunk/sound/usb/usx2y/usX2Yhwdep.c @@ -48,7 +48,7 @@ static struct page * snd_us428ctls_vm_nopage(struct vm_area_struct *area, unsign offset = area->vm_pgoff << PAGE_SHIFT; offset += address - area->vm_start; - snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS); + snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM); vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset; page = virt_to_page(vaddr); get_page(page);